1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*******************************************************************************
3   * This file contains the iSCSI Target specific utility functions.
4   *
5   * (c) Copyright 2007-2013 Datera, Inc.
6   *
7   * Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
8   *
9   ******************************************************************************/
10  
11  #include <linux/list.h>
12  #include <linux/sched/signal.h>
13  #include <net/ipv6.h>         /* ipv6_addr_equal() */
14  #include <scsi/scsi_tcq.h>
15  #include <scsi/iscsi_proto.h>
16  #include <target/target_core_base.h>
17  #include <target/target_core_fabric.h>
18  #include <target/iscsi/iscsi_transport.h>
19  
20  #include <target/iscsi/iscsi_target_core.h>
21  #include "iscsi_target_parameters.h"
22  #include "iscsi_target_seq_pdu_list.h"
23  #include "iscsi_target_datain_values.h"
24  #include "iscsi_target_erl0.h"
25  #include "iscsi_target_erl1.h"
26  #include "iscsi_target_erl2.h"
27  #include "iscsi_target_tpg.h"
28  #include "iscsi_target_util.h"
29  #include "iscsi_target.h"
30  
31  extern struct list_head g_tiqn_list;
32  extern spinlock_t tiqn_lock;
33  
iscsit_add_r2t_to_list(struct iscsit_cmd * cmd,u32 offset,u32 xfer_len,int recovery,u32 r2t_sn)34  int iscsit_add_r2t_to_list(
35  	struct iscsit_cmd *cmd,
36  	u32 offset,
37  	u32 xfer_len,
38  	int recovery,
39  	u32 r2t_sn)
40  {
41  	struct iscsi_r2t *r2t;
42  
43  	lockdep_assert_held(&cmd->r2t_lock);
44  
45  	WARN_ON_ONCE((s32)xfer_len < 0);
46  
47  	r2t = kmem_cache_zalloc(lio_r2t_cache, GFP_ATOMIC);
48  	if (!r2t) {
49  		pr_err("Unable to allocate memory for struct iscsi_r2t.\n");
50  		return -1;
51  	}
52  	INIT_LIST_HEAD(&r2t->r2t_list);
53  
54  	r2t->recovery_r2t = recovery;
55  	r2t->r2t_sn = (!r2t_sn) ? cmd->r2t_sn++ : r2t_sn;
56  	r2t->offset = offset;
57  	r2t->xfer_len = xfer_len;
58  	list_add_tail(&r2t->r2t_list, &cmd->cmd_r2t_list);
59  	spin_unlock_bh(&cmd->r2t_lock);
60  
61  	iscsit_add_cmd_to_immediate_queue(cmd, cmd->conn, ISTATE_SEND_R2T);
62  
63  	spin_lock_bh(&cmd->r2t_lock);
64  	return 0;
65  }
66  
iscsit_get_r2t_for_eos(struct iscsit_cmd * cmd,u32 offset,u32 length)67  struct iscsi_r2t *iscsit_get_r2t_for_eos(
68  	struct iscsit_cmd *cmd,
69  	u32 offset,
70  	u32 length)
71  {
72  	struct iscsi_r2t *r2t;
73  
74  	spin_lock_bh(&cmd->r2t_lock);
75  	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
76  		if ((r2t->offset <= offset) &&
77  		    (r2t->offset + r2t->xfer_len) >= (offset + length)) {
78  			spin_unlock_bh(&cmd->r2t_lock);
79  			return r2t;
80  		}
81  	}
82  	spin_unlock_bh(&cmd->r2t_lock);
83  
84  	pr_err("Unable to locate R2T for Offset: %u, Length:"
85  			" %u\n", offset, length);
86  	return NULL;
87  }
88  
iscsit_get_r2t_from_list(struct iscsit_cmd * cmd)89  struct iscsi_r2t *iscsit_get_r2t_from_list(struct iscsit_cmd *cmd)
90  {
91  	struct iscsi_r2t *r2t;
92  
93  	spin_lock_bh(&cmd->r2t_lock);
94  	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
95  		if (!r2t->sent_r2t) {
96  			spin_unlock_bh(&cmd->r2t_lock);
97  			return r2t;
98  		}
99  	}
100  	spin_unlock_bh(&cmd->r2t_lock);
101  
102  	pr_err("Unable to locate next R2T to send for ITT:"
103  			" 0x%08x.\n", cmd->init_task_tag);
104  	return NULL;
105  }
106  
iscsit_free_r2t(struct iscsi_r2t * r2t,struct iscsit_cmd * cmd)107  void iscsit_free_r2t(struct iscsi_r2t *r2t, struct iscsit_cmd *cmd)
108  {
109  	lockdep_assert_held(&cmd->r2t_lock);
110  
111  	list_del(&r2t->r2t_list);
112  	kmem_cache_free(lio_r2t_cache, r2t);
113  }
114  
iscsit_free_r2ts_from_list(struct iscsit_cmd * cmd)115  void iscsit_free_r2ts_from_list(struct iscsit_cmd *cmd)
116  {
117  	struct iscsi_r2t *r2t, *r2t_tmp;
118  
119  	spin_lock_bh(&cmd->r2t_lock);
120  	list_for_each_entry_safe(r2t, r2t_tmp, &cmd->cmd_r2t_list, r2t_list)
121  		iscsit_free_r2t(r2t, cmd);
122  	spin_unlock_bh(&cmd->r2t_lock);
123  }
124  
iscsit_wait_for_tag(struct se_session * se_sess,int state,int * cpup)125  static int iscsit_wait_for_tag(struct se_session *se_sess, int state, int *cpup)
126  {
127  	int tag = -1;
128  	DEFINE_SBQ_WAIT(wait);
129  	struct sbq_wait_state *ws;
130  	struct sbitmap_queue *sbq;
131  
132  	if (state == TASK_RUNNING)
133  		return tag;
134  
135  	sbq = &se_sess->sess_tag_pool;
136  	ws = &sbq->ws[0];
137  	for (;;) {
138  		sbitmap_prepare_to_wait(sbq, ws, &wait, state);
139  		if (signal_pending_state(state, current))
140  			break;
141  		tag = sbitmap_queue_get(sbq, cpup);
142  		if (tag >= 0)
143  			break;
144  		schedule();
145  	}
146  
147  	sbitmap_finish_wait(sbq, ws, &wait);
148  	return tag;
149  }
150  
151  /*
152   * May be called from software interrupt (timer) context for allocating
153   * iSCSI NopINs.
154   */
iscsit_allocate_cmd(struct iscsit_conn * conn,int state)155  struct iscsit_cmd *iscsit_allocate_cmd(struct iscsit_conn *conn, int state)
156  {
157  	struct iscsit_cmd *cmd;
158  	struct se_session *se_sess = conn->sess->se_sess;
159  	int size, tag, cpu;
160  
161  	tag = sbitmap_queue_get(&se_sess->sess_tag_pool, &cpu);
162  	if (tag < 0)
163  		tag = iscsit_wait_for_tag(se_sess, state, &cpu);
164  	if (tag < 0)
165  		return NULL;
166  
167  	size = sizeof(struct iscsit_cmd) + conn->conn_transport->priv_size;
168  	cmd = (struct iscsit_cmd *)(se_sess->sess_cmd_map + (tag * size));
169  	memset(cmd, 0, size);
170  
171  	cmd->se_cmd.map_tag = tag;
172  	cmd->se_cmd.map_cpu = cpu;
173  	cmd->conn = conn;
174  	cmd->data_direction = DMA_NONE;
175  	INIT_LIST_HEAD(&cmd->i_conn_node);
176  	INIT_LIST_HEAD(&cmd->datain_list);
177  	INIT_LIST_HEAD(&cmd->cmd_r2t_list);
178  	spin_lock_init(&cmd->datain_lock);
179  	spin_lock_init(&cmd->dataout_timeout_lock);
180  	spin_lock_init(&cmd->istate_lock);
181  	spin_lock_init(&cmd->error_lock);
182  	spin_lock_init(&cmd->r2t_lock);
183  	timer_setup(&cmd->dataout_timer, iscsit_handle_dataout_timeout, 0);
184  
185  	return cmd;
186  }
187  EXPORT_SYMBOL(iscsit_allocate_cmd);
188  
iscsit_get_seq_holder_for_datain(struct iscsit_cmd * cmd,u32 seq_send_order)189  struct iscsi_seq *iscsit_get_seq_holder_for_datain(
190  	struct iscsit_cmd *cmd,
191  	u32 seq_send_order)
192  {
193  	u32 i;
194  
195  	for (i = 0; i < cmd->seq_count; i++)
196  		if (cmd->seq_list[i].seq_send_order == seq_send_order)
197  			return &cmd->seq_list[i];
198  
199  	return NULL;
200  }
201  
iscsit_get_seq_holder_for_r2t(struct iscsit_cmd * cmd)202  struct iscsi_seq *iscsit_get_seq_holder_for_r2t(struct iscsit_cmd *cmd)
203  {
204  	u32 i;
205  
206  	if (!cmd->seq_list) {
207  		pr_err("struct iscsit_cmd->seq_list is NULL!\n");
208  		return NULL;
209  	}
210  
211  	for (i = 0; i < cmd->seq_count; i++) {
212  		if (cmd->seq_list[i].type != SEQTYPE_NORMAL)
213  			continue;
214  		if (cmd->seq_list[i].seq_send_order == cmd->seq_send_order) {
215  			cmd->seq_send_order++;
216  			return &cmd->seq_list[i];
217  		}
218  	}
219  
220  	return NULL;
221  }
222  
iscsit_get_holder_for_r2tsn(struct iscsit_cmd * cmd,u32 r2t_sn)223  struct iscsi_r2t *iscsit_get_holder_for_r2tsn(
224  	struct iscsit_cmd *cmd,
225  	u32 r2t_sn)
226  {
227  	struct iscsi_r2t *r2t;
228  
229  	spin_lock_bh(&cmd->r2t_lock);
230  	list_for_each_entry(r2t, &cmd->cmd_r2t_list, r2t_list) {
231  		if (r2t->r2t_sn == r2t_sn) {
232  			spin_unlock_bh(&cmd->r2t_lock);
233  			return r2t;
234  		}
235  	}
236  	spin_unlock_bh(&cmd->r2t_lock);
237  
238  	return NULL;
239  }
240  
iscsit_check_received_cmdsn(struct iscsit_session * sess,u32 cmdsn)241  static inline int iscsit_check_received_cmdsn(struct iscsit_session *sess, u32 cmdsn)
242  {
243  	u32 max_cmdsn;
244  	int ret;
245  
246  	/*
247  	 * This is the proper method of checking received CmdSN against
248  	 * ExpCmdSN and MaxCmdSN values, as well as accounting for out
249  	 * or order CmdSNs due to multiple connection sessions and/or
250  	 * CRC failures.
251  	 */
252  	max_cmdsn = atomic_read(&sess->max_cmd_sn);
253  	if (iscsi_sna_gt(cmdsn, max_cmdsn)) {
254  		pr_err("Received CmdSN: 0x%08x is greater than"
255  		       " MaxCmdSN: 0x%08x, ignoring.\n", cmdsn, max_cmdsn);
256  		ret = CMDSN_MAXCMDSN_OVERRUN;
257  
258  	} else if (cmdsn == sess->exp_cmd_sn) {
259  		sess->exp_cmd_sn++;
260  		pr_debug("Received CmdSN matches ExpCmdSN,"
261  		      " incremented ExpCmdSN to: 0x%08x\n",
262  		      sess->exp_cmd_sn);
263  		ret = CMDSN_NORMAL_OPERATION;
264  
265  	} else if (iscsi_sna_gt(cmdsn, sess->exp_cmd_sn)) {
266  		pr_debug("Received CmdSN: 0x%08x is greater"
267  		      " than ExpCmdSN: 0x%08x, not acknowledging.\n",
268  		      cmdsn, sess->exp_cmd_sn);
269  		ret = CMDSN_HIGHER_THAN_EXP;
270  
271  	} else {
272  		pr_err("Received CmdSN: 0x%08x is less than"
273  		       " ExpCmdSN: 0x%08x, ignoring.\n", cmdsn,
274  		       sess->exp_cmd_sn);
275  		ret = CMDSN_LOWER_THAN_EXP;
276  	}
277  
278  	return ret;
279  }
280  
281  /*
282   * Commands may be received out of order if MC/S is in use.
283   * Ensure they are executed in CmdSN order.
284   */
iscsit_sequence_cmd(struct iscsit_conn * conn,struct iscsit_cmd * cmd,unsigned char * buf,__be32 cmdsn)285  int iscsit_sequence_cmd(struct iscsit_conn *conn, struct iscsit_cmd *cmd,
286  			unsigned char *buf, __be32 cmdsn)
287  {
288  	int ret, cmdsn_ret;
289  	bool reject = false;
290  	u8 reason = ISCSI_REASON_BOOKMARK_NO_RESOURCES;
291  
292  	mutex_lock(&conn->sess->cmdsn_mutex);
293  
294  	cmdsn_ret = iscsit_check_received_cmdsn(conn->sess, be32_to_cpu(cmdsn));
295  	switch (cmdsn_ret) {
296  	case CMDSN_NORMAL_OPERATION:
297  		ret = iscsit_execute_cmd(cmd, 0);
298  		if ((ret >= 0) && !list_empty(&conn->sess->sess_ooo_cmdsn_list))
299  			iscsit_execute_ooo_cmdsns(conn->sess);
300  		else if (ret < 0) {
301  			reject = true;
302  			ret = CMDSN_ERROR_CANNOT_RECOVER;
303  		}
304  		break;
305  	case CMDSN_HIGHER_THAN_EXP:
306  		ret = iscsit_handle_ooo_cmdsn(conn->sess, cmd, be32_to_cpu(cmdsn));
307  		if (ret < 0) {
308  			reject = true;
309  			ret = CMDSN_ERROR_CANNOT_RECOVER;
310  			break;
311  		}
312  		ret = CMDSN_HIGHER_THAN_EXP;
313  		break;
314  	case CMDSN_LOWER_THAN_EXP:
315  	case CMDSN_MAXCMDSN_OVERRUN:
316  	default:
317  		cmd->i_state = ISTATE_REMOVE;
318  		iscsit_add_cmd_to_immediate_queue(cmd, conn, cmd->i_state);
319  		/*
320  		 * Existing callers for iscsit_sequence_cmd() will silently
321  		 * ignore commands with CMDSN_LOWER_THAN_EXP, so force this
322  		 * return for CMDSN_MAXCMDSN_OVERRUN as well..
323  		 */
324  		ret = CMDSN_LOWER_THAN_EXP;
325  		break;
326  	}
327  	mutex_unlock(&conn->sess->cmdsn_mutex);
328  
329  	if (reject)
330  		iscsit_reject_cmd(cmd, reason, buf);
331  
332  	return ret;
333  }
334  EXPORT_SYMBOL(iscsit_sequence_cmd);
335  
iscsit_check_unsolicited_dataout(struct iscsit_cmd * cmd,unsigned char * buf)336  int iscsit_check_unsolicited_dataout(struct iscsit_cmd *cmd, unsigned char *buf)
337  {
338  	struct iscsit_conn *conn = cmd->conn;
339  	struct se_cmd *se_cmd = &cmd->se_cmd;
340  	struct iscsi_data *hdr = (struct iscsi_data *) buf;
341  	u32 payload_length = ntoh24(hdr->dlength);
342  
343  	if (conn->sess->sess_ops->InitialR2T) {
344  		pr_err("Received unexpected unsolicited data"
345  			" while InitialR2T=Yes, protocol error.\n");
346  		transport_send_check_condition_and_sense(se_cmd,
347  				TCM_UNEXPECTED_UNSOLICITED_DATA, 0);
348  		return -1;
349  	}
350  
351  	if ((cmd->first_burst_len + payload_length) >
352  	     conn->sess->sess_ops->FirstBurstLength) {
353  		pr_err("Total %u bytes exceeds FirstBurstLength: %u"
354  			" for this Unsolicited DataOut Burst.\n",
355  			(cmd->first_burst_len + payload_length),
356  				conn->sess->sess_ops->FirstBurstLength);
357  		transport_send_check_condition_and_sense(se_cmd,
358  				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
359  		return -1;
360  	}
361  
362  	if (!(hdr->flags & ISCSI_FLAG_CMD_FINAL))
363  		return 0;
364  
365  	if (((cmd->first_burst_len + payload_length) != cmd->se_cmd.data_length) &&
366  	    ((cmd->first_burst_len + payload_length) !=
367  	      conn->sess->sess_ops->FirstBurstLength)) {
368  		pr_err("Unsolicited non-immediate data received %u"
369  			" does not equal FirstBurstLength: %u, and does"
370  			" not equal ExpXferLen %u.\n",
371  			(cmd->first_burst_len + payload_length),
372  			conn->sess->sess_ops->FirstBurstLength, cmd->se_cmd.data_length);
373  		transport_send_check_condition_and_sense(se_cmd,
374  				TCM_INCORRECT_AMOUNT_OF_DATA, 0);
375  		return -1;
376  	}
377  	return 0;
378  }
379  
iscsit_find_cmd_from_itt(struct iscsit_conn * conn,itt_t init_task_tag)380  struct iscsit_cmd *iscsit_find_cmd_from_itt(
381  	struct iscsit_conn *conn,
382  	itt_t init_task_tag)
383  {
384  	struct iscsit_cmd *cmd;
385  
386  	spin_lock_bh(&conn->cmd_lock);
387  	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
388  		if (cmd->init_task_tag == init_task_tag) {
389  			spin_unlock_bh(&conn->cmd_lock);
390  			return cmd;
391  		}
392  	}
393  	spin_unlock_bh(&conn->cmd_lock);
394  
395  	pr_err("Unable to locate ITT: 0x%08x on CID: %hu",
396  			init_task_tag, conn->cid);
397  	return NULL;
398  }
399  EXPORT_SYMBOL(iscsit_find_cmd_from_itt);
400  
iscsit_find_cmd_from_itt_or_dump(struct iscsit_conn * conn,itt_t init_task_tag,u32 length)401  struct iscsit_cmd *iscsit_find_cmd_from_itt_or_dump(
402  	struct iscsit_conn *conn,
403  	itt_t init_task_tag,
404  	u32 length)
405  {
406  	struct iscsit_cmd *cmd;
407  
408  	spin_lock_bh(&conn->cmd_lock);
409  	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
410  		if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT)
411  			continue;
412  		if (cmd->init_task_tag == init_task_tag) {
413  			spin_unlock_bh(&conn->cmd_lock);
414  			return cmd;
415  		}
416  	}
417  	spin_unlock_bh(&conn->cmd_lock);
418  
419  	pr_err("Unable to locate ITT: 0x%08x on CID: %hu,"
420  			" dumping payload\n", init_task_tag, conn->cid);
421  	if (length)
422  		iscsit_dump_data_payload(conn, length, 1);
423  
424  	return NULL;
425  }
426  EXPORT_SYMBOL(iscsit_find_cmd_from_itt_or_dump);
427  
iscsit_find_cmd_from_ttt(struct iscsit_conn * conn,u32 targ_xfer_tag)428  struct iscsit_cmd *iscsit_find_cmd_from_ttt(
429  	struct iscsit_conn *conn,
430  	u32 targ_xfer_tag)
431  {
432  	struct iscsit_cmd *cmd = NULL;
433  
434  	spin_lock_bh(&conn->cmd_lock);
435  	list_for_each_entry(cmd, &conn->conn_cmd_list, i_conn_node) {
436  		if (cmd->targ_xfer_tag == targ_xfer_tag) {
437  			spin_unlock_bh(&conn->cmd_lock);
438  			return cmd;
439  		}
440  	}
441  	spin_unlock_bh(&conn->cmd_lock);
442  
443  	pr_err("Unable to locate TTT: 0x%08x on CID: %hu\n",
444  			targ_xfer_tag, conn->cid);
445  	return NULL;
446  }
447  
iscsit_find_cmd_for_recovery(struct iscsit_session * sess,struct iscsit_cmd ** cmd_ptr,struct iscsi_conn_recovery ** cr_ptr,itt_t init_task_tag)448  int iscsit_find_cmd_for_recovery(
449  	struct iscsit_session *sess,
450  	struct iscsit_cmd **cmd_ptr,
451  	struct iscsi_conn_recovery **cr_ptr,
452  	itt_t init_task_tag)
453  {
454  	struct iscsit_cmd *cmd = NULL;
455  	struct iscsi_conn_recovery *cr;
456  	/*
457  	 * Scan through the inactive connection recovery list's command list.
458  	 * If init_task_tag matches the command is still alligent.
459  	 */
460  	spin_lock(&sess->cr_i_lock);
461  	list_for_each_entry(cr, &sess->cr_inactive_list, cr_list) {
462  		spin_lock(&cr->conn_recovery_cmd_lock);
463  		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
464  			if (cmd->init_task_tag == init_task_tag) {
465  				spin_unlock(&cr->conn_recovery_cmd_lock);
466  				spin_unlock(&sess->cr_i_lock);
467  
468  				*cr_ptr = cr;
469  				*cmd_ptr = cmd;
470  				return -2;
471  			}
472  		}
473  		spin_unlock(&cr->conn_recovery_cmd_lock);
474  	}
475  	spin_unlock(&sess->cr_i_lock);
476  	/*
477  	 * Scan through the active connection recovery list's command list.
478  	 * If init_task_tag matches the command is ready to be reassigned.
479  	 */
480  	spin_lock(&sess->cr_a_lock);
481  	list_for_each_entry(cr, &sess->cr_active_list, cr_list) {
482  		spin_lock(&cr->conn_recovery_cmd_lock);
483  		list_for_each_entry(cmd, &cr->conn_recovery_cmd_list, i_conn_node) {
484  			if (cmd->init_task_tag == init_task_tag) {
485  				spin_unlock(&cr->conn_recovery_cmd_lock);
486  				spin_unlock(&sess->cr_a_lock);
487  
488  				*cr_ptr = cr;
489  				*cmd_ptr = cmd;
490  				return 0;
491  			}
492  		}
493  		spin_unlock(&cr->conn_recovery_cmd_lock);
494  	}
495  	spin_unlock(&sess->cr_a_lock);
496  
497  	return -1;
498  }
499  
iscsit_add_cmd_to_immediate_queue(struct iscsit_cmd * cmd,struct iscsit_conn * conn,u8 state)500  void iscsit_add_cmd_to_immediate_queue(
501  	struct iscsit_cmd *cmd,
502  	struct iscsit_conn *conn,
503  	u8 state)
504  {
505  	struct iscsi_queue_req *qr;
506  
507  	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
508  	if (!qr) {
509  		pr_err("Unable to allocate memory for"
510  				" struct iscsi_queue_req\n");
511  		return;
512  	}
513  	INIT_LIST_HEAD(&qr->qr_list);
514  	qr->cmd = cmd;
515  	qr->state = state;
516  
517  	spin_lock_bh(&conn->immed_queue_lock);
518  	list_add_tail(&qr->qr_list, &conn->immed_queue_list);
519  	atomic_inc(&cmd->immed_queue_count);
520  	atomic_set(&conn->check_immediate_queue, 1);
521  	spin_unlock_bh(&conn->immed_queue_lock);
522  
523  	wake_up(&conn->queues_wq);
524  }
525  EXPORT_SYMBOL(iscsit_add_cmd_to_immediate_queue);
526  
iscsit_get_cmd_from_immediate_queue(struct iscsit_conn * conn)527  struct iscsi_queue_req *iscsit_get_cmd_from_immediate_queue(struct iscsit_conn *conn)
528  {
529  	struct iscsi_queue_req *qr;
530  
531  	spin_lock_bh(&conn->immed_queue_lock);
532  	if (list_empty(&conn->immed_queue_list)) {
533  		spin_unlock_bh(&conn->immed_queue_lock);
534  		return NULL;
535  	}
536  	qr = list_first_entry(&conn->immed_queue_list,
537  			      struct iscsi_queue_req, qr_list);
538  
539  	list_del(&qr->qr_list);
540  	if (qr->cmd)
541  		atomic_dec(&qr->cmd->immed_queue_count);
542  	spin_unlock_bh(&conn->immed_queue_lock);
543  
544  	return qr;
545  }
546  
iscsit_remove_cmd_from_immediate_queue(struct iscsit_cmd * cmd,struct iscsit_conn * conn)547  static void iscsit_remove_cmd_from_immediate_queue(
548  	struct iscsit_cmd *cmd,
549  	struct iscsit_conn *conn)
550  {
551  	struct iscsi_queue_req *qr, *qr_tmp;
552  
553  	spin_lock_bh(&conn->immed_queue_lock);
554  	if (!atomic_read(&cmd->immed_queue_count)) {
555  		spin_unlock_bh(&conn->immed_queue_lock);
556  		return;
557  	}
558  
559  	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
560  		if (qr->cmd != cmd)
561  			continue;
562  
563  		atomic_dec(&qr->cmd->immed_queue_count);
564  		list_del(&qr->qr_list);
565  		kmem_cache_free(lio_qr_cache, qr);
566  	}
567  	spin_unlock_bh(&conn->immed_queue_lock);
568  
569  	if (atomic_read(&cmd->immed_queue_count)) {
570  		pr_err("ITT: 0x%08x immed_queue_count: %d\n",
571  			cmd->init_task_tag,
572  			atomic_read(&cmd->immed_queue_count));
573  	}
574  }
575  
iscsit_add_cmd_to_response_queue(struct iscsit_cmd * cmd,struct iscsit_conn * conn,u8 state)576  int iscsit_add_cmd_to_response_queue(
577  	struct iscsit_cmd *cmd,
578  	struct iscsit_conn *conn,
579  	u8 state)
580  {
581  	struct iscsi_queue_req *qr;
582  
583  	qr = kmem_cache_zalloc(lio_qr_cache, GFP_ATOMIC);
584  	if (!qr) {
585  		pr_err("Unable to allocate memory for"
586  			" struct iscsi_queue_req\n");
587  		return -ENOMEM;
588  	}
589  	INIT_LIST_HEAD(&qr->qr_list);
590  	qr->cmd = cmd;
591  	qr->state = state;
592  
593  	spin_lock_bh(&conn->response_queue_lock);
594  	list_add_tail(&qr->qr_list, &conn->response_queue_list);
595  	atomic_inc(&cmd->response_queue_count);
596  	spin_unlock_bh(&conn->response_queue_lock);
597  
598  	wake_up(&conn->queues_wq);
599  	return 0;
600  }
601  
iscsit_get_cmd_from_response_queue(struct iscsit_conn * conn)602  struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsit_conn *conn)
603  {
604  	struct iscsi_queue_req *qr;
605  
606  	spin_lock_bh(&conn->response_queue_lock);
607  	if (list_empty(&conn->response_queue_list)) {
608  		spin_unlock_bh(&conn->response_queue_lock);
609  		return NULL;
610  	}
611  
612  	qr = list_first_entry(&conn->response_queue_list,
613  			      struct iscsi_queue_req, qr_list);
614  
615  	list_del(&qr->qr_list);
616  	if (qr->cmd)
617  		atomic_dec(&qr->cmd->response_queue_count);
618  	spin_unlock_bh(&conn->response_queue_lock);
619  
620  	return qr;
621  }
622  
iscsit_remove_cmd_from_response_queue(struct iscsit_cmd * cmd,struct iscsit_conn * conn)623  static void iscsit_remove_cmd_from_response_queue(
624  	struct iscsit_cmd *cmd,
625  	struct iscsit_conn *conn)
626  {
627  	struct iscsi_queue_req *qr, *qr_tmp;
628  
629  	spin_lock_bh(&conn->response_queue_lock);
630  	if (!atomic_read(&cmd->response_queue_count)) {
631  		spin_unlock_bh(&conn->response_queue_lock);
632  		return;
633  	}
634  
635  	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
636  				qr_list) {
637  		if (qr->cmd != cmd)
638  			continue;
639  
640  		atomic_dec(&qr->cmd->response_queue_count);
641  		list_del(&qr->qr_list);
642  		kmem_cache_free(lio_qr_cache, qr);
643  	}
644  	spin_unlock_bh(&conn->response_queue_lock);
645  
646  	if (atomic_read(&cmd->response_queue_count)) {
647  		pr_err("ITT: 0x%08x response_queue_count: %d\n",
648  			cmd->init_task_tag,
649  			atomic_read(&cmd->response_queue_count));
650  	}
651  }
652  
iscsit_conn_all_queues_empty(struct iscsit_conn * conn)653  bool iscsit_conn_all_queues_empty(struct iscsit_conn *conn)
654  {
655  	bool empty;
656  
657  	spin_lock_bh(&conn->immed_queue_lock);
658  	empty = list_empty(&conn->immed_queue_list);
659  	spin_unlock_bh(&conn->immed_queue_lock);
660  
661  	if (!empty)
662  		return empty;
663  
664  	spin_lock_bh(&conn->response_queue_lock);
665  	empty = list_empty(&conn->response_queue_list);
666  	spin_unlock_bh(&conn->response_queue_lock);
667  
668  	return empty;
669  }
670  
iscsit_free_queue_reqs_for_conn(struct iscsit_conn * conn)671  void iscsit_free_queue_reqs_for_conn(struct iscsit_conn *conn)
672  {
673  	struct iscsi_queue_req *qr, *qr_tmp;
674  
675  	spin_lock_bh(&conn->immed_queue_lock);
676  	list_for_each_entry_safe(qr, qr_tmp, &conn->immed_queue_list, qr_list) {
677  		list_del(&qr->qr_list);
678  		if (qr->cmd)
679  			atomic_dec(&qr->cmd->immed_queue_count);
680  
681  		kmem_cache_free(lio_qr_cache, qr);
682  	}
683  	spin_unlock_bh(&conn->immed_queue_lock);
684  
685  	spin_lock_bh(&conn->response_queue_lock);
686  	list_for_each_entry_safe(qr, qr_tmp, &conn->response_queue_list,
687  			qr_list) {
688  		list_del(&qr->qr_list);
689  		if (qr->cmd)
690  			atomic_dec(&qr->cmd->response_queue_count);
691  
692  		kmem_cache_free(lio_qr_cache, qr);
693  	}
694  	spin_unlock_bh(&conn->response_queue_lock);
695  }
696  
iscsit_release_cmd(struct iscsit_cmd * cmd)697  void iscsit_release_cmd(struct iscsit_cmd *cmd)
698  {
699  	struct iscsit_session *sess;
700  	struct se_cmd *se_cmd = &cmd->se_cmd;
701  
702  	WARN_ON(!list_empty(&cmd->i_conn_node));
703  
704  	if (cmd->conn)
705  		sess = cmd->conn->sess;
706  	else
707  		sess = cmd->sess;
708  
709  	BUG_ON(!sess || !sess->se_sess);
710  
711  	kfree(cmd->buf_ptr);
712  	kfree(cmd->pdu_list);
713  	kfree(cmd->seq_list);
714  	kfree(cmd->tmr_req);
715  	kfree(cmd->overflow_buf);
716  	kfree(cmd->iov_data);
717  	kfree(cmd->text_in_ptr);
718  
719  	target_free_tag(sess->se_sess, se_cmd);
720  }
721  EXPORT_SYMBOL(iscsit_release_cmd);
722  
__iscsit_free_cmd(struct iscsit_cmd * cmd,bool check_queues)723  void __iscsit_free_cmd(struct iscsit_cmd *cmd, bool check_queues)
724  {
725  	struct iscsit_conn *conn = cmd->conn;
726  
727  	WARN_ON(!list_empty(&cmd->i_conn_node));
728  
729  	if (cmd->data_direction == DMA_TO_DEVICE) {
730  		iscsit_stop_dataout_timer(cmd);
731  		iscsit_free_r2ts_from_list(cmd);
732  	}
733  	if (cmd->data_direction == DMA_FROM_DEVICE)
734  		iscsit_free_all_datain_reqs(cmd);
735  
736  	if (conn && check_queues) {
737  		iscsit_remove_cmd_from_immediate_queue(cmd, conn);
738  		iscsit_remove_cmd_from_response_queue(cmd, conn);
739  	}
740  
741  	if (conn && conn->conn_transport->iscsit_unmap_cmd)
742  		conn->conn_transport->iscsit_unmap_cmd(conn, cmd);
743  }
744  
iscsit_free_cmd(struct iscsit_cmd * cmd,bool shutdown)745  void iscsit_free_cmd(struct iscsit_cmd *cmd, bool shutdown)
746  {
747  	struct se_cmd *se_cmd = cmd->se_cmd.se_tfo ? &cmd->se_cmd : NULL;
748  	int rc;
749  
750  	WARN_ON(!list_empty(&cmd->i_conn_node));
751  
752  	__iscsit_free_cmd(cmd, shutdown);
753  	if (se_cmd) {
754  		rc = transport_generic_free_cmd(se_cmd, shutdown);
755  		if (!rc && shutdown && se_cmd->se_sess) {
756  			__iscsit_free_cmd(cmd, shutdown);
757  			target_put_sess_cmd(se_cmd);
758  		}
759  	} else {
760  		iscsit_release_cmd(cmd);
761  	}
762  }
763  EXPORT_SYMBOL(iscsit_free_cmd);
764  
iscsit_check_session_usage_count(struct iscsit_session * sess,bool can_sleep)765  bool iscsit_check_session_usage_count(struct iscsit_session *sess,
766  				      bool can_sleep)
767  {
768  	spin_lock_bh(&sess->session_usage_lock);
769  	if (sess->session_usage_count != 0) {
770  		sess->session_waiting_on_uc = 1;
771  		spin_unlock_bh(&sess->session_usage_lock);
772  		if (!can_sleep)
773  			return true;
774  
775  		wait_for_completion(&sess->session_waiting_on_uc_comp);
776  		return false;
777  	}
778  	spin_unlock_bh(&sess->session_usage_lock);
779  
780  	return false;
781  }
782  
iscsit_dec_session_usage_count(struct iscsit_session * sess)783  void iscsit_dec_session_usage_count(struct iscsit_session *sess)
784  {
785  	spin_lock_bh(&sess->session_usage_lock);
786  	sess->session_usage_count--;
787  
788  	if (!sess->session_usage_count && sess->session_waiting_on_uc)
789  		complete(&sess->session_waiting_on_uc_comp);
790  
791  	spin_unlock_bh(&sess->session_usage_lock);
792  }
793  
iscsit_inc_session_usage_count(struct iscsit_session * sess)794  void iscsit_inc_session_usage_count(struct iscsit_session *sess)
795  {
796  	spin_lock_bh(&sess->session_usage_lock);
797  	sess->session_usage_count++;
798  	spin_unlock_bh(&sess->session_usage_lock);
799  }
800  
iscsit_get_conn_from_cid(struct iscsit_session * sess,u16 cid)801  struct iscsit_conn *iscsit_get_conn_from_cid(struct iscsit_session *sess, u16 cid)
802  {
803  	struct iscsit_conn *conn;
804  
805  	spin_lock_bh(&sess->conn_lock);
806  	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
807  		if ((conn->cid == cid) &&
808  		    (conn->conn_state == TARG_CONN_STATE_LOGGED_IN)) {
809  			iscsit_inc_conn_usage_count(conn);
810  			spin_unlock_bh(&sess->conn_lock);
811  			return conn;
812  		}
813  	}
814  	spin_unlock_bh(&sess->conn_lock);
815  
816  	return NULL;
817  }
818  
iscsit_get_conn_from_cid_rcfr(struct iscsit_session * sess,u16 cid)819  struct iscsit_conn *iscsit_get_conn_from_cid_rcfr(struct iscsit_session *sess, u16 cid)
820  {
821  	struct iscsit_conn *conn;
822  
823  	spin_lock_bh(&sess->conn_lock);
824  	list_for_each_entry(conn, &sess->sess_conn_list, conn_list) {
825  		if (conn->cid == cid) {
826  			iscsit_inc_conn_usage_count(conn);
827  			spin_lock(&conn->state_lock);
828  			atomic_set(&conn->connection_wait_rcfr, 1);
829  			spin_unlock(&conn->state_lock);
830  			spin_unlock_bh(&sess->conn_lock);
831  			return conn;
832  		}
833  	}
834  	spin_unlock_bh(&sess->conn_lock);
835  
836  	return NULL;
837  }
838  
iscsit_check_conn_usage_count(struct iscsit_conn * conn)839  void iscsit_check_conn_usage_count(struct iscsit_conn *conn)
840  {
841  	spin_lock_bh(&conn->conn_usage_lock);
842  	if (conn->conn_usage_count != 0) {
843  		conn->conn_waiting_on_uc = 1;
844  		spin_unlock_bh(&conn->conn_usage_lock);
845  
846  		wait_for_completion(&conn->conn_waiting_on_uc_comp);
847  		return;
848  	}
849  	spin_unlock_bh(&conn->conn_usage_lock);
850  }
851  
iscsit_dec_conn_usage_count(struct iscsit_conn * conn)852  void iscsit_dec_conn_usage_count(struct iscsit_conn *conn)
853  {
854  	spin_lock_bh(&conn->conn_usage_lock);
855  	conn->conn_usage_count--;
856  
857  	if (!conn->conn_usage_count && conn->conn_waiting_on_uc)
858  		complete(&conn->conn_waiting_on_uc_comp);
859  
860  	spin_unlock_bh(&conn->conn_usage_lock);
861  }
862  
iscsit_inc_conn_usage_count(struct iscsit_conn * conn)863  void iscsit_inc_conn_usage_count(struct iscsit_conn *conn)
864  {
865  	spin_lock_bh(&conn->conn_usage_lock);
866  	conn->conn_usage_count++;
867  	spin_unlock_bh(&conn->conn_usage_lock);
868  }
869  
iscsit_add_nopin(struct iscsit_conn * conn,int want_response)870  static int iscsit_add_nopin(struct iscsit_conn *conn, int want_response)
871  {
872  	u8 state;
873  	struct iscsit_cmd *cmd;
874  
875  	cmd = iscsit_allocate_cmd(conn, TASK_RUNNING);
876  	if (!cmd)
877  		return -1;
878  
879  	cmd->iscsi_opcode = ISCSI_OP_NOOP_IN;
880  	state = (want_response) ? ISTATE_SEND_NOPIN_WANT_RESPONSE :
881  				ISTATE_SEND_NOPIN_NO_RESPONSE;
882  	cmd->init_task_tag = RESERVED_ITT;
883  	cmd->targ_xfer_tag = (want_response) ?
884  			     session_get_next_ttt(conn->sess) : 0xFFFFFFFF;
885  	spin_lock_bh(&conn->cmd_lock);
886  	list_add_tail(&cmd->i_conn_node, &conn->conn_cmd_list);
887  	spin_unlock_bh(&conn->cmd_lock);
888  
889  	if (want_response)
890  		iscsit_start_nopin_response_timer(conn);
891  	iscsit_add_cmd_to_immediate_queue(cmd, conn, state);
892  
893  	return 0;
894  }
895  
iscsit_handle_nopin_response_timeout(struct timer_list * t)896  void iscsit_handle_nopin_response_timeout(struct timer_list *t)
897  {
898  	struct iscsit_conn *conn = from_timer(conn, t, nopin_response_timer);
899  	struct iscsit_session *sess = conn->sess;
900  
901  	iscsit_inc_conn_usage_count(conn);
902  
903  	spin_lock_bh(&conn->nopin_timer_lock);
904  	if (conn->nopin_response_timer_flags & ISCSI_TF_STOP) {
905  		spin_unlock_bh(&conn->nopin_timer_lock);
906  		iscsit_dec_conn_usage_count(conn);
907  		return;
908  	}
909  
910  	pr_err("Did not receive response to NOPIN on CID: %hu, failing"
911  		" connection for I_T Nexus %s,i,0x%6phN,%s,t,0x%02x\n",
912  		conn->cid, sess->sess_ops->InitiatorName, sess->isid,
913  		sess->tpg->tpg_tiqn->tiqn, (u32)sess->tpg->tpgt);
914  	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
915  	spin_unlock_bh(&conn->nopin_timer_lock);
916  
917  	iscsit_fill_cxn_timeout_err_stats(sess);
918  	iscsit_cause_connection_reinstatement(conn, 0);
919  	iscsit_dec_conn_usage_count(conn);
920  }
921  
iscsit_mod_nopin_response_timer(struct iscsit_conn * conn)922  void iscsit_mod_nopin_response_timer(struct iscsit_conn *conn)
923  {
924  	struct iscsit_session *sess = conn->sess;
925  	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
926  
927  	spin_lock_bh(&conn->nopin_timer_lock);
928  	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
929  		spin_unlock_bh(&conn->nopin_timer_lock);
930  		return;
931  	}
932  
933  	mod_timer(&conn->nopin_response_timer,
934  		(get_jiffies_64() + na->nopin_response_timeout * HZ));
935  	spin_unlock_bh(&conn->nopin_timer_lock);
936  }
937  
iscsit_start_nopin_response_timer(struct iscsit_conn * conn)938  void iscsit_start_nopin_response_timer(struct iscsit_conn *conn)
939  {
940  	struct iscsit_session *sess = conn->sess;
941  	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
942  
943  	spin_lock_bh(&conn->nopin_timer_lock);
944  	if (conn->nopin_response_timer_flags & ISCSI_TF_RUNNING) {
945  		spin_unlock_bh(&conn->nopin_timer_lock);
946  		return;
947  	}
948  
949  	conn->nopin_response_timer_flags &= ~ISCSI_TF_STOP;
950  	conn->nopin_response_timer_flags |= ISCSI_TF_RUNNING;
951  	mod_timer(&conn->nopin_response_timer,
952  		  jiffies + na->nopin_response_timeout * HZ);
953  
954  	pr_debug("Started NOPIN Response Timer on CID: %d to %u"
955  		" seconds\n", conn->cid, na->nopin_response_timeout);
956  	spin_unlock_bh(&conn->nopin_timer_lock);
957  }
958  
iscsit_stop_nopin_response_timer(struct iscsit_conn * conn)959  void iscsit_stop_nopin_response_timer(struct iscsit_conn *conn)
960  {
961  	spin_lock_bh(&conn->nopin_timer_lock);
962  	if (!(conn->nopin_response_timer_flags & ISCSI_TF_RUNNING)) {
963  		spin_unlock_bh(&conn->nopin_timer_lock);
964  		return;
965  	}
966  	conn->nopin_response_timer_flags |= ISCSI_TF_STOP;
967  	spin_unlock_bh(&conn->nopin_timer_lock);
968  
969  	del_timer_sync(&conn->nopin_response_timer);
970  
971  	spin_lock_bh(&conn->nopin_timer_lock);
972  	conn->nopin_response_timer_flags &= ~ISCSI_TF_RUNNING;
973  	spin_unlock_bh(&conn->nopin_timer_lock);
974  }
975  
iscsit_handle_nopin_timeout(struct timer_list * t)976  void iscsit_handle_nopin_timeout(struct timer_list *t)
977  {
978  	struct iscsit_conn *conn = from_timer(conn, t, nopin_timer);
979  
980  	iscsit_inc_conn_usage_count(conn);
981  
982  	spin_lock_bh(&conn->nopin_timer_lock);
983  	if (conn->nopin_timer_flags & ISCSI_TF_STOP) {
984  		spin_unlock_bh(&conn->nopin_timer_lock);
985  		iscsit_dec_conn_usage_count(conn);
986  		return;
987  	}
988  	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
989  	spin_unlock_bh(&conn->nopin_timer_lock);
990  
991  	iscsit_add_nopin(conn, 1);
992  	iscsit_dec_conn_usage_count(conn);
993  }
994  
__iscsit_start_nopin_timer(struct iscsit_conn * conn)995  void __iscsit_start_nopin_timer(struct iscsit_conn *conn)
996  {
997  	struct iscsit_session *sess = conn->sess;
998  	struct iscsi_node_attrib *na = iscsit_tpg_get_node_attrib(sess);
999  
1000  	lockdep_assert_held(&conn->nopin_timer_lock);
1001  
1002  	/*
1003  	* NOPIN timeout is disabled.
1004  	 */
1005  	if (!na->nopin_timeout)
1006  		return;
1007  
1008  	if (conn->nopin_timer_flags & ISCSI_TF_RUNNING)
1009  		return;
1010  
1011  	conn->nopin_timer_flags &= ~ISCSI_TF_STOP;
1012  	conn->nopin_timer_flags |= ISCSI_TF_RUNNING;
1013  	mod_timer(&conn->nopin_timer, jiffies + na->nopin_timeout * HZ);
1014  
1015  	pr_debug("Started NOPIN Timer on CID: %d at %u second"
1016  		" interval\n", conn->cid, na->nopin_timeout);
1017  }
1018  
iscsit_start_nopin_timer(struct iscsit_conn * conn)1019  void iscsit_start_nopin_timer(struct iscsit_conn *conn)
1020  {
1021  	spin_lock_bh(&conn->nopin_timer_lock);
1022  	__iscsit_start_nopin_timer(conn);
1023  	spin_unlock_bh(&conn->nopin_timer_lock);
1024  }
1025  
iscsit_stop_nopin_timer(struct iscsit_conn * conn)1026  void iscsit_stop_nopin_timer(struct iscsit_conn *conn)
1027  {
1028  	spin_lock_bh(&conn->nopin_timer_lock);
1029  	if (!(conn->nopin_timer_flags & ISCSI_TF_RUNNING)) {
1030  		spin_unlock_bh(&conn->nopin_timer_lock);
1031  		return;
1032  	}
1033  	conn->nopin_timer_flags |= ISCSI_TF_STOP;
1034  	spin_unlock_bh(&conn->nopin_timer_lock);
1035  
1036  	del_timer_sync(&conn->nopin_timer);
1037  
1038  	spin_lock_bh(&conn->nopin_timer_lock);
1039  	conn->nopin_timer_flags &= ~ISCSI_TF_RUNNING;
1040  	spin_unlock_bh(&conn->nopin_timer_lock);
1041  }
1042  
iscsit_login_timeout(struct timer_list * t)1043  void iscsit_login_timeout(struct timer_list *t)
1044  {
1045  	struct iscsit_conn *conn = from_timer(conn, t, login_timer);
1046  	struct iscsi_login *login = conn->login;
1047  
1048  	pr_debug("Entering iscsi_target_login_timeout >>>>>>>>>>>>>>>>>>>\n");
1049  
1050  	spin_lock_bh(&conn->login_timer_lock);
1051  	login->login_failed = 1;
1052  
1053  	if (conn->login_kworker) {
1054  		pr_debug("Sending SIGINT to conn->login_kworker %s/%d\n",
1055  			 conn->login_kworker->comm, conn->login_kworker->pid);
1056  		send_sig(SIGINT, conn->login_kworker, 1);
1057  	} else {
1058  		schedule_delayed_work(&conn->login_work, 0);
1059  	}
1060  	spin_unlock_bh(&conn->login_timer_lock);
1061  }
1062  
iscsit_start_login_timer(struct iscsit_conn * conn,struct task_struct * kthr)1063  void iscsit_start_login_timer(struct iscsit_conn *conn, struct task_struct *kthr)
1064  {
1065  	pr_debug("Login timer started\n");
1066  
1067  	conn->login_kworker = kthr;
1068  	mod_timer(&conn->login_timer, jiffies + TA_LOGIN_TIMEOUT * HZ);
1069  }
1070  
iscsit_set_login_timer_kworker(struct iscsit_conn * conn,struct task_struct * kthr)1071  int iscsit_set_login_timer_kworker(struct iscsit_conn *conn, struct task_struct *kthr)
1072  {
1073  	struct iscsi_login *login = conn->login;
1074  	int ret = 0;
1075  
1076  	spin_lock_bh(&conn->login_timer_lock);
1077  	if (login->login_failed) {
1078  		/* The timer has already expired */
1079  		ret = -1;
1080  	} else {
1081  		conn->login_kworker = kthr;
1082  	}
1083  	spin_unlock_bh(&conn->login_timer_lock);
1084  
1085  	return ret;
1086  }
1087  
iscsit_stop_login_timer(struct iscsit_conn * conn)1088  void iscsit_stop_login_timer(struct iscsit_conn *conn)
1089  {
1090  	pr_debug("Login timer stopped\n");
1091  	timer_delete_sync(&conn->login_timer);
1092  }
1093  
iscsit_send_tx_data(struct iscsit_cmd * cmd,struct iscsit_conn * conn,int use_misc)1094  int iscsit_send_tx_data(
1095  	struct iscsit_cmd *cmd,
1096  	struct iscsit_conn *conn,
1097  	int use_misc)
1098  {
1099  	int tx_sent, tx_size;
1100  	u32 iov_count;
1101  	struct kvec *iov;
1102  
1103  send_data:
1104  	tx_size = cmd->tx_size;
1105  
1106  	if (!use_misc) {
1107  		iov = &cmd->iov_data[0];
1108  		iov_count = cmd->iov_data_count;
1109  	} else {
1110  		iov = &cmd->iov_misc[0];
1111  		iov_count = cmd->iov_misc_count;
1112  	}
1113  
1114  	tx_sent = tx_data(conn, &iov[0], iov_count, tx_size);
1115  	if (tx_size != tx_sent) {
1116  		if (tx_sent == -EAGAIN) {
1117  			pr_err("tx_data() returned -EAGAIN\n");
1118  			goto send_data;
1119  		} else
1120  			return -1;
1121  	}
1122  	cmd->tx_size = 0;
1123  
1124  	return 0;
1125  }
1126  
iscsit_fe_sendpage_sg(struct iscsit_cmd * cmd,struct iscsit_conn * conn)1127  int iscsit_fe_sendpage_sg(
1128  	struct iscsit_cmd *cmd,
1129  	struct iscsit_conn *conn)
1130  {
1131  	struct scatterlist *sg = cmd->first_data_sg;
1132  	struct bio_vec bvec;
1133  	struct msghdr msghdr = { .msg_flags = MSG_SPLICE_PAGES,	};
1134  	struct kvec iov;
1135  	u32 tx_hdr_size, data_len;
1136  	u32 offset = cmd->first_data_sg_off;
1137  	int tx_sent, iov_off;
1138  
1139  send_hdr:
1140  	tx_hdr_size = ISCSI_HDR_LEN;
1141  	if (conn->conn_ops->HeaderDigest)
1142  		tx_hdr_size += ISCSI_CRC_LEN;
1143  
1144  	iov.iov_base = cmd->pdu;
1145  	iov.iov_len = tx_hdr_size;
1146  
1147  	tx_sent = tx_data(conn, &iov, 1, tx_hdr_size);
1148  	if (tx_hdr_size != tx_sent) {
1149  		if (tx_sent == -EAGAIN) {
1150  			pr_err("tx_data() returned -EAGAIN\n");
1151  			goto send_hdr;
1152  		}
1153  		return -1;
1154  	}
1155  
1156  	data_len = cmd->tx_size - tx_hdr_size - cmd->padding;
1157  	/*
1158  	 * Set iov_off used by padding and data digest tx_data() calls below
1159  	 * in order to determine proper offset into cmd->iov_data[]
1160  	 */
1161  	if (conn->conn_ops->DataDigest) {
1162  		data_len -= ISCSI_CRC_LEN;
1163  		if (cmd->padding)
1164  			iov_off = (cmd->iov_data_count - 2);
1165  		else
1166  			iov_off = (cmd->iov_data_count - 1);
1167  	} else {
1168  		iov_off = (cmd->iov_data_count - 1);
1169  	}
1170  	/*
1171  	 * Perform sendpage() for each page in the scatterlist
1172  	 */
1173  	while (data_len) {
1174  		u32 space = (sg->length - offset);
1175  		u32 sub_len = min_t(u32, data_len, space);
1176  send_pg:
1177  		bvec_set_page(&bvec, sg_page(sg), sub_len, sg->offset + offset);
1178  		iov_iter_bvec(&msghdr.msg_iter, ITER_SOURCE, &bvec, 1, sub_len);
1179  
1180  		tx_sent = conn->sock->ops->sendmsg(conn->sock, &msghdr,
1181  						   sub_len);
1182  		if (tx_sent != sub_len) {
1183  			if (tx_sent == -EAGAIN) {
1184  				pr_err("sendmsg/splice returned -EAGAIN\n");
1185  				goto send_pg;
1186  			}
1187  
1188  			pr_err("sendmsg/splice failure: %d\n", tx_sent);
1189  			return -1;
1190  		}
1191  
1192  		data_len -= sub_len;
1193  		offset = 0;
1194  		sg = sg_next(sg);
1195  	}
1196  
1197  send_padding:
1198  	if (cmd->padding) {
1199  		struct kvec *iov_p = &cmd->iov_data[iov_off++];
1200  
1201  		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);
1202  		if (cmd->padding != tx_sent) {
1203  			if (tx_sent == -EAGAIN) {
1204  				pr_err("tx_data() returned -EAGAIN\n");
1205  				goto send_padding;
1206  			}
1207  			return -1;
1208  		}
1209  	}
1210  
1211  send_datacrc:
1212  	if (conn->conn_ops->DataDigest) {
1213  		struct kvec *iov_d = &cmd->iov_data[iov_off];
1214  
1215  		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);
1216  		if (ISCSI_CRC_LEN != tx_sent) {
1217  			if (tx_sent == -EAGAIN) {
1218  				pr_err("tx_data() returned -EAGAIN\n");
1219  				goto send_datacrc;
1220  			}
1221  			return -1;
1222  		}
1223  	}
1224  
1225  	return 0;
1226  }
1227  
1228  /*
1229   *      This function is used for mainly sending a ISCSI_TARG_LOGIN_RSP PDU
1230   *      back to the Initiator when an expection condition occurs with the
1231   *      errors set in status_class and status_detail.
1232   *
1233   *      Parameters:     iSCSI Connection, Status Class, Status Detail.
1234   *      Returns:        0 on success, -1 on error.
1235   */
iscsit_tx_login_rsp(struct iscsit_conn * conn,u8 status_class,u8 status_detail)1236  int iscsit_tx_login_rsp(struct iscsit_conn *conn, u8 status_class, u8 status_detail)
1237  {
1238  	struct iscsi_login_rsp *hdr;
1239  	struct iscsi_login *login = conn->conn_login;
1240  
1241  	login->login_failed = 1;
1242  	iscsit_collect_login_stats(conn, status_class, status_detail);
1243  
1244  	memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1245  
1246  	hdr	= (struct iscsi_login_rsp *)&login->rsp[0];
1247  	hdr->opcode		= ISCSI_OP_LOGIN_RSP;
1248  	hdr->status_class	= status_class;
1249  	hdr->status_detail	= status_detail;
1250  	hdr->itt		= conn->login_itt;
1251  
1252  	return conn->conn_transport->iscsit_put_login_tx(conn, login, 0);
1253  }
1254  
iscsit_print_session_params(struct iscsit_session * sess)1255  void iscsit_print_session_params(struct iscsit_session *sess)
1256  {
1257  	struct iscsit_conn *conn;
1258  
1259  	pr_debug("-----------------------------[Session Params for"
1260  		" SID: %u]-----------------------------\n", sess->sid);
1261  	spin_lock_bh(&sess->conn_lock);
1262  	list_for_each_entry(conn, &sess->sess_conn_list, conn_list)
1263  		iscsi_dump_conn_ops(conn->conn_ops);
1264  	spin_unlock_bh(&sess->conn_lock);
1265  
1266  	iscsi_dump_sess_ops(sess->sess_ops);
1267  }
1268  
rx_data(struct iscsit_conn * conn,struct kvec * iov,int iov_count,int data)1269  int rx_data(
1270  	struct iscsit_conn *conn,
1271  	struct kvec *iov,
1272  	int iov_count,
1273  	int data)
1274  {
1275  	int rx_loop = 0, total_rx = 0;
1276  	struct msghdr msg;
1277  
1278  	if (!conn || !conn->sock || !conn->conn_ops)
1279  		return -1;
1280  
1281  	memset(&msg, 0, sizeof(struct msghdr));
1282  	iov_iter_kvec(&msg.msg_iter, ITER_DEST, iov, iov_count, data);
1283  
1284  	while (msg_data_left(&msg)) {
1285  		rx_loop = sock_recvmsg(conn->sock, &msg, MSG_WAITALL);
1286  		if (rx_loop <= 0) {
1287  			pr_debug("rx_loop: %d total_rx: %d\n",
1288  				rx_loop, total_rx);
1289  			return rx_loop;
1290  		}
1291  		total_rx += rx_loop;
1292  		pr_debug("rx_loop: %d, total_rx: %d, data: %d\n",
1293  				rx_loop, total_rx, data);
1294  	}
1295  
1296  	return total_rx;
1297  }
1298  
tx_data(struct iscsit_conn * conn,struct kvec * iov,int iov_count,int data)1299  int tx_data(
1300  	struct iscsit_conn *conn,
1301  	struct kvec *iov,
1302  	int iov_count,
1303  	int data)
1304  {
1305  	struct msghdr msg;
1306  	int total_tx = 0;
1307  
1308  	if (!conn || !conn->sock || !conn->conn_ops)
1309  		return -1;
1310  
1311  	if (data <= 0) {
1312  		pr_err("Data length is: %d\n", data);
1313  		return -1;
1314  	}
1315  
1316  	memset(&msg, 0, sizeof(struct msghdr));
1317  
1318  	iov_iter_kvec(&msg.msg_iter, ITER_SOURCE, iov, iov_count, data);
1319  
1320  	while (msg_data_left(&msg)) {
1321  		int tx_loop = sock_sendmsg(conn->sock, &msg);
1322  		if (tx_loop <= 0) {
1323  			pr_debug("tx_loop: %d total_tx %d\n",
1324  				tx_loop, total_tx);
1325  			return tx_loop;
1326  		}
1327  		total_tx += tx_loop;
1328  		pr_debug("tx_loop: %d, total_tx: %d, data: %d\n",
1329  					tx_loop, total_tx, data);
1330  	}
1331  
1332  	return total_tx;
1333  }
1334  
iscsit_collect_login_stats(struct iscsit_conn * conn,u8 status_class,u8 status_detail)1335  void iscsit_collect_login_stats(
1336  	struct iscsit_conn *conn,
1337  	u8 status_class,
1338  	u8 status_detail)
1339  {
1340  	struct iscsi_param *intrname = NULL;
1341  	struct iscsi_tiqn *tiqn;
1342  	struct iscsi_login_stats *ls;
1343  
1344  	tiqn = iscsit_snmp_get_tiqn(conn);
1345  	if (!tiqn)
1346  		return;
1347  
1348  	ls = &tiqn->login_stats;
1349  
1350  	spin_lock(&ls->lock);
1351  	if (status_class == ISCSI_STATUS_CLS_SUCCESS)
1352  		ls->accepts++;
1353  	else if (status_class == ISCSI_STATUS_CLS_REDIRECT) {
1354  		ls->redirects++;
1355  		ls->last_fail_type = ISCSI_LOGIN_FAIL_REDIRECT;
1356  	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1357  		 (status_detail == ISCSI_LOGIN_STATUS_AUTH_FAILED)) {
1358  		ls->authenticate_fails++;
1359  		ls->last_fail_type =  ISCSI_LOGIN_FAIL_AUTHENTICATE;
1360  	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR)  &&
1361  		 (status_detail == ISCSI_LOGIN_STATUS_TGT_FORBIDDEN)) {
1362  		ls->authorize_fails++;
1363  		ls->last_fail_type = ISCSI_LOGIN_FAIL_AUTHORIZE;
1364  	} else if ((status_class == ISCSI_STATUS_CLS_INITIATOR_ERR) &&
1365  		 (status_detail == ISCSI_LOGIN_STATUS_INIT_ERR)) {
1366  		ls->negotiate_fails++;
1367  		ls->last_fail_type = ISCSI_LOGIN_FAIL_NEGOTIATE;
1368  	} else {
1369  		ls->other_fails++;
1370  		ls->last_fail_type = ISCSI_LOGIN_FAIL_OTHER;
1371  	}
1372  
1373  	/* Save initiator name, ip address and time, if it is a failed login */
1374  	if (status_class != ISCSI_STATUS_CLS_SUCCESS) {
1375  		if (conn->param_list)
1376  			intrname = iscsi_find_param_from_key(INITIATORNAME,
1377  							     conn->param_list);
1378  		strscpy(ls->last_intr_fail_name,
1379  		       (intrname ? intrname->value : "Unknown"),
1380  		       sizeof(ls->last_intr_fail_name));
1381  
1382  		ls->last_intr_fail_ip_family = conn->login_family;
1383  
1384  		ls->last_intr_fail_sockaddr = conn->login_sockaddr;
1385  		ls->last_fail_time = get_jiffies_64();
1386  	}
1387  
1388  	spin_unlock(&ls->lock);
1389  }
1390  
iscsit_snmp_get_tiqn(struct iscsit_conn * conn)1391  struct iscsi_tiqn *iscsit_snmp_get_tiqn(struct iscsit_conn *conn)
1392  {
1393  	struct iscsi_portal_group *tpg;
1394  
1395  	if (!conn)
1396  		return NULL;
1397  
1398  	tpg = conn->tpg;
1399  	if (!tpg)
1400  		return NULL;
1401  
1402  	if (!tpg->tpg_tiqn)
1403  		return NULL;
1404  
1405  	return tpg->tpg_tiqn;
1406  }
1407  
iscsit_fill_cxn_timeout_err_stats(struct iscsit_session * sess)1408  void iscsit_fill_cxn_timeout_err_stats(struct iscsit_session *sess)
1409  {
1410  	struct iscsi_portal_group *tpg = sess->tpg;
1411  	struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
1412  
1413  	if (!tiqn)
1414  		return;
1415  
1416  	spin_lock_bh(&tiqn->sess_err_stats.lock);
1417  	strscpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
1418  			sess->sess_ops->InitiatorName,
1419  			sizeof(tiqn->sess_err_stats.last_sess_fail_rem_name));
1420  	tiqn->sess_err_stats.last_sess_failure_type =
1421  			ISCSI_SESS_ERR_CXN_TIMEOUT;
1422  	tiqn->sess_err_stats.cxn_timeout_errors++;
1423  	atomic_long_inc(&sess->conn_timeout_errors);
1424  	spin_unlock_bh(&tiqn->sess_err_stats.lock);
1425  }
1426