1  // SPDX-License-Identifier: GPL-2.0-or-later
2  /*******************************************************************************
3   * Filename:  target_core_transport.c
4   *
5   * This file contains the Generic Target Engine Core.
6   *
7   * (c) Copyright 2002-2013 Datera, Inc.
8   *
9   * Nicholas A. Bellinger <nab@kernel.org>
10   *
11   ******************************************************************************/
12  
13  #include <linux/net.h>
14  #include <linux/delay.h>
15  #include <linux/string.h>
16  #include <linux/timer.h>
17  #include <linux/slab.h>
18  #include <linux/spinlock.h>
19  #include <linux/kthread.h>
20  #include <linux/in.h>
21  #include <linux/cdrom.h>
22  #include <linux/module.h>
23  #include <linux/ratelimit.h>
24  #include <linux/vmalloc.h>
25  #include <linux/unaligned.h>
26  #include <net/sock.h>
27  #include <net/tcp.h>
28  #include <scsi/scsi_proto.h>
29  #include <scsi/scsi_common.h>
30  
31  #include <target/target_core_base.h>
32  #include <target/target_core_backend.h>
33  #include <target/target_core_fabric.h>
34  
35  #include "target_core_internal.h"
36  #include "target_core_alua.h"
37  #include "target_core_pr.h"
38  #include "target_core_ua.h"
39  
40  #define CREATE_TRACE_POINTS
41  #include <trace/events/target.h>
42  
43  static struct workqueue_struct *target_completion_wq;
44  static struct workqueue_struct *target_submission_wq;
45  static struct kmem_cache *se_sess_cache;
46  struct kmem_cache *se_ua_cache;
47  struct kmem_cache *t10_pr_reg_cache;
48  struct kmem_cache *t10_alua_lu_gp_cache;
49  struct kmem_cache *t10_alua_lu_gp_mem_cache;
50  struct kmem_cache *t10_alua_tg_pt_gp_cache;
51  struct kmem_cache *t10_alua_lba_map_cache;
52  struct kmem_cache *t10_alua_lba_map_mem_cache;
53  
54  static void transport_complete_task_attr(struct se_cmd *cmd);
55  static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
56  static void transport_handle_queue_full(struct se_cmd *cmd,
57  		struct se_device *dev, int err, bool write_pending);
58  static void target_complete_ok_work(struct work_struct *work);
59  
init_se_kmem_caches(void)60  int init_se_kmem_caches(void)
61  {
62  	se_sess_cache = kmem_cache_create("se_sess_cache",
63  			sizeof(struct se_session), __alignof__(struct se_session),
64  			0, NULL);
65  	if (!se_sess_cache) {
66  		pr_err("kmem_cache_create() for struct se_session"
67  				" failed\n");
68  		goto out;
69  	}
70  	se_ua_cache = kmem_cache_create("se_ua_cache",
71  			sizeof(struct se_ua), __alignof__(struct se_ua),
72  			0, NULL);
73  	if (!se_ua_cache) {
74  		pr_err("kmem_cache_create() for struct se_ua failed\n");
75  		goto out_free_sess_cache;
76  	}
77  	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
78  			sizeof(struct t10_pr_registration),
79  			__alignof__(struct t10_pr_registration), 0, NULL);
80  	if (!t10_pr_reg_cache) {
81  		pr_err("kmem_cache_create() for struct t10_pr_registration"
82  				" failed\n");
83  		goto out_free_ua_cache;
84  	}
85  	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
86  			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
87  			0, NULL);
88  	if (!t10_alua_lu_gp_cache) {
89  		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
90  				" failed\n");
91  		goto out_free_pr_reg_cache;
92  	}
93  	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
94  			sizeof(struct t10_alua_lu_gp_member),
95  			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
96  	if (!t10_alua_lu_gp_mem_cache) {
97  		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
98  				"cache failed\n");
99  		goto out_free_lu_gp_cache;
100  	}
101  	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
102  			sizeof(struct t10_alua_tg_pt_gp),
103  			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
104  	if (!t10_alua_tg_pt_gp_cache) {
105  		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
106  				"cache failed\n");
107  		goto out_free_lu_gp_mem_cache;
108  	}
109  	t10_alua_lba_map_cache = kmem_cache_create(
110  			"t10_alua_lba_map_cache",
111  			sizeof(struct t10_alua_lba_map),
112  			__alignof__(struct t10_alua_lba_map), 0, NULL);
113  	if (!t10_alua_lba_map_cache) {
114  		pr_err("kmem_cache_create() for t10_alua_lba_map_"
115  				"cache failed\n");
116  		goto out_free_tg_pt_gp_cache;
117  	}
118  	t10_alua_lba_map_mem_cache = kmem_cache_create(
119  			"t10_alua_lba_map_mem_cache",
120  			sizeof(struct t10_alua_lba_map_member),
121  			__alignof__(struct t10_alua_lba_map_member), 0, NULL);
122  	if (!t10_alua_lba_map_mem_cache) {
123  		pr_err("kmem_cache_create() for t10_alua_lba_map_mem_"
124  				"cache failed\n");
125  		goto out_free_lba_map_cache;
126  	}
127  
128  	target_completion_wq = alloc_workqueue("target_completion",
129  					       WQ_MEM_RECLAIM, 0);
130  	if (!target_completion_wq)
131  		goto out_free_lba_map_mem_cache;
132  
133  	target_submission_wq = alloc_workqueue("target_submission",
134  					       WQ_MEM_RECLAIM, 0);
135  	if (!target_submission_wq)
136  		goto out_free_completion_wq;
137  
138  	return 0;
139  
140  out_free_completion_wq:
141  	destroy_workqueue(target_completion_wq);
142  out_free_lba_map_mem_cache:
143  	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
144  out_free_lba_map_cache:
145  	kmem_cache_destroy(t10_alua_lba_map_cache);
146  out_free_tg_pt_gp_cache:
147  	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
148  out_free_lu_gp_mem_cache:
149  	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
150  out_free_lu_gp_cache:
151  	kmem_cache_destroy(t10_alua_lu_gp_cache);
152  out_free_pr_reg_cache:
153  	kmem_cache_destroy(t10_pr_reg_cache);
154  out_free_ua_cache:
155  	kmem_cache_destroy(se_ua_cache);
156  out_free_sess_cache:
157  	kmem_cache_destroy(se_sess_cache);
158  out:
159  	return -ENOMEM;
160  }
161  
release_se_kmem_caches(void)162  void release_se_kmem_caches(void)
163  {
164  	destroy_workqueue(target_submission_wq);
165  	destroy_workqueue(target_completion_wq);
166  	kmem_cache_destroy(se_sess_cache);
167  	kmem_cache_destroy(se_ua_cache);
168  	kmem_cache_destroy(t10_pr_reg_cache);
169  	kmem_cache_destroy(t10_alua_lu_gp_cache);
170  	kmem_cache_destroy(t10_alua_lu_gp_mem_cache);
171  	kmem_cache_destroy(t10_alua_tg_pt_gp_cache);
172  	kmem_cache_destroy(t10_alua_lba_map_cache);
173  	kmem_cache_destroy(t10_alua_lba_map_mem_cache);
174  }
175  
176  /* This code ensures unique mib indexes are handed out. */
177  static DEFINE_SPINLOCK(scsi_mib_index_lock);
178  static u32 scsi_mib_index[SCSI_INDEX_TYPE_MAX];
179  
180  /*
181   * Allocate a new row index for the entry type specified
182   */
scsi_get_new_index(scsi_index_t type)183  u32 scsi_get_new_index(scsi_index_t type)
184  {
185  	u32 new_index;
186  
187  	BUG_ON((type < 0) || (type >= SCSI_INDEX_TYPE_MAX));
188  
189  	spin_lock(&scsi_mib_index_lock);
190  	new_index = ++scsi_mib_index[type];
191  	spin_unlock(&scsi_mib_index_lock);
192  
193  	return new_index;
194  }
195  
transport_subsystem_check_init(void)196  void transport_subsystem_check_init(void)
197  {
198  	int ret;
199  	static int sub_api_initialized;
200  
201  	if (sub_api_initialized)
202  		return;
203  
204  	ret = IS_ENABLED(CONFIG_TCM_IBLOCK) && request_module("target_core_iblock");
205  	if (ret != 0)
206  		pr_err("Unable to load target_core_iblock\n");
207  
208  	ret = IS_ENABLED(CONFIG_TCM_FILEIO) && request_module("target_core_file");
209  	if (ret != 0)
210  		pr_err("Unable to load target_core_file\n");
211  
212  	ret = IS_ENABLED(CONFIG_TCM_PSCSI) && request_module("target_core_pscsi");
213  	if (ret != 0)
214  		pr_err("Unable to load target_core_pscsi\n");
215  
216  	ret = IS_ENABLED(CONFIG_TCM_USER2) && request_module("target_core_user");
217  	if (ret != 0)
218  		pr_err("Unable to load target_core_user\n");
219  
220  	sub_api_initialized = 1;
221  }
222  
target_release_cmd_refcnt(struct percpu_ref * ref)223  static void target_release_cmd_refcnt(struct percpu_ref *ref)
224  {
225  	struct target_cmd_counter *cmd_cnt  = container_of(ref,
226  							   typeof(*cmd_cnt),
227  							   refcnt);
228  	wake_up(&cmd_cnt->refcnt_wq);
229  }
230  
target_alloc_cmd_counter(void)231  struct target_cmd_counter *target_alloc_cmd_counter(void)
232  {
233  	struct target_cmd_counter *cmd_cnt;
234  	int rc;
235  
236  	cmd_cnt = kzalloc(sizeof(*cmd_cnt), GFP_KERNEL);
237  	if (!cmd_cnt)
238  		return NULL;
239  
240  	init_completion(&cmd_cnt->stop_done);
241  	init_waitqueue_head(&cmd_cnt->refcnt_wq);
242  	atomic_set(&cmd_cnt->stopped, 0);
243  
244  	rc = percpu_ref_init(&cmd_cnt->refcnt, target_release_cmd_refcnt, 0,
245  			     GFP_KERNEL);
246  	if (rc)
247  		goto free_cmd_cnt;
248  
249  	return cmd_cnt;
250  
251  free_cmd_cnt:
252  	kfree(cmd_cnt);
253  	return NULL;
254  }
255  EXPORT_SYMBOL_GPL(target_alloc_cmd_counter);
256  
target_free_cmd_counter(struct target_cmd_counter * cmd_cnt)257  void target_free_cmd_counter(struct target_cmd_counter *cmd_cnt)
258  {
259  	/*
260  	 * Drivers like loop do not call target_stop_session during session
261  	 * shutdown so we have to drop the ref taken at init time here.
262  	 */
263  	if (!atomic_read(&cmd_cnt->stopped))
264  		percpu_ref_put(&cmd_cnt->refcnt);
265  
266  	percpu_ref_exit(&cmd_cnt->refcnt);
267  	kfree(cmd_cnt);
268  }
269  EXPORT_SYMBOL_GPL(target_free_cmd_counter);
270  
271  /**
272   * transport_init_session - initialize a session object
273   * @se_sess: Session object pointer.
274   *
275   * The caller must have zero-initialized @se_sess before calling this function.
276   */
transport_init_session(struct se_session * se_sess)277  void transport_init_session(struct se_session *se_sess)
278  {
279  	INIT_LIST_HEAD(&se_sess->sess_list);
280  	INIT_LIST_HEAD(&se_sess->sess_acl_list);
281  	spin_lock_init(&se_sess->sess_cmd_lock);
282  }
283  EXPORT_SYMBOL(transport_init_session);
284  
285  /**
286   * transport_alloc_session - allocate a session object and initialize it
287   * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
288   */
transport_alloc_session(enum target_prot_op sup_prot_ops)289  struct se_session *transport_alloc_session(enum target_prot_op sup_prot_ops)
290  {
291  	struct se_session *se_sess;
292  
293  	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
294  	if (!se_sess) {
295  		pr_err("Unable to allocate struct se_session from"
296  				" se_sess_cache\n");
297  		return ERR_PTR(-ENOMEM);
298  	}
299  	transport_init_session(se_sess);
300  	se_sess->sup_prot_ops = sup_prot_ops;
301  
302  	return se_sess;
303  }
304  EXPORT_SYMBOL(transport_alloc_session);
305  
306  /**
307   * transport_alloc_session_tags - allocate target driver private data
308   * @se_sess:  Session pointer.
309   * @tag_num:  Maximum number of in-flight commands between initiator and target.
310   * @tag_size: Size in bytes of the private data a target driver associates with
311   *	      each command.
312   */
transport_alloc_session_tags(struct se_session * se_sess,unsigned int tag_num,unsigned int tag_size)313  int transport_alloc_session_tags(struct se_session *se_sess,
314  			         unsigned int tag_num, unsigned int tag_size)
315  {
316  	int rc;
317  
318  	se_sess->sess_cmd_map = kvcalloc(tag_size, tag_num,
319  					 GFP_KERNEL | __GFP_RETRY_MAYFAIL);
320  	if (!se_sess->sess_cmd_map) {
321  		pr_err("Unable to allocate se_sess->sess_cmd_map\n");
322  		return -ENOMEM;
323  	}
324  
325  	rc = sbitmap_queue_init_node(&se_sess->sess_tag_pool, tag_num, -1,
326  			false, GFP_KERNEL, NUMA_NO_NODE);
327  	if (rc < 0) {
328  		pr_err("Unable to init se_sess->sess_tag_pool,"
329  			" tag_num: %u\n", tag_num);
330  		kvfree(se_sess->sess_cmd_map);
331  		se_sess->sess_cmd_map = NULL;
332  		return -ENOMEM;
333  	}
334  
335  	return 0;
336  }
337  EXPORT_SYMBOL(transport_alloc_session_tags);
338  
339  /**
340   * transport_init_session_tags - allocate a session and target driver private data
341   * @tag_num:  Maximum number of in-flight commands between initiator and target.
342   * @tag_size: Size in bytes of the private data a target driver associates with
343   *	      each command.
344   * @sup_prot_ops: bitmask that defines which T10-PI modes are supported.
345   */
346  static struct se_session *
transport_init_session_tags(unsigned int tag_num,unsigned int tag_size,enum target_prot_op sup_prot_ops)347  transport_init_session_tags(unsigned int tag_num, unsigned int tag_size,
348  			    enum target_prot_op sup_prot_ops)
349  {
350  	struct se_session *se_sess;
351  	int rc;
352  
353  	if (tag_num != 0 && !tag_size) {
354  		pr_err("init_session_tags called with percpu-ida tag_num:"
355  		       " %u, but zero tag_size\n", tag_num);
356  		return ERR_PTR(-EINVAL);
357  	}
358  	if (!tag_num && tag_size) {
359  		pr_err("init_session_tags called with percpu-ida tag_size:"
360  		       " %u, but zero tag_num\n", tag_size);
361  		return ERR_PTR(-EINVAL);
362  	}
363  
364  	se_sess = transport_alloc_session(sup_prot_ops);
365  	if (IS_ERR(se_sess))
366  		return se_sess;
367  
368  	rc = transport_alloc_session_tags(se_sess, tag_num, tag_size);
369  	if (rc < 0) {
370  		transport_free_session(se_sess);
371  		return ERR_PTR(-ENOMEM);
372  	}
373  
374  	return se_sess;
375  }
376  
377  /*
378   * Called with spin_lock_irqsave(&struct se_portal_group->session_lock called.
379   */
__transport_register_session(struct se_portal_group * se_tpg,struct se_node_acl * se_nacl,struct se_session * se_sess,void * fabric_sess_ptr)380  void __transport_register_session(
381  	struct se_portal_group *se_tpg,
382  	struct se_node_acl *se_nacl,
383  	struct se_session *se_sess,
384  	void *fabric_sess_ptr)
385  {
386  	const struct target_core_fabric_ops *tfo = se_tpg->se_tpg_tfo;
387  	unsigned char buf[PR_REG_ISID_LEN];
388  	unsigned long flags;
389  
390  	se_sess->se_tpg = se_tpg;
391  	se_sess->fabric_sess_ptr = fabric_sess_ptr;
392  	/*
393  	 * Used by struct se_node_acl's under ConfigFS to locate active se_session-t
394  	 *
395  	 * Only set for struct se_session's that will actually be moving I/O.
396  	 * eg: *NOT* discovery sessions.
397  	 */
398  	if (se_nacl) {
399  		/*
400  		 *
401  		 * Determine if fabric allows for T10-PI feature bits exposed to
402  		 * initiators for device backends with !dev->dev_attrib.pi_prot_type.
403  		 *
404  		 * If so, then always save prot_type on a per se_node_acl node
405  		 * basis and re-instate the previous sess_prot_type to avoid
406  		 * disabling PI from below any previously initiator side
407  		 * registered LUNs.
408  		 */
409  		if (se_nacl->saved_prot_type)
410  			se_sess->sess_prot_type = se_nacl->saved_prot_type;
411  		else if (tfo->tpg_check_prot_fabric_only)
412  			se_sess->sess_prot_type = se_nacl->saved_prot_type =
413  					tfo->tpg_check_prot_fabric_only(se_tpg);
414  		/*
415  		 * If the fabric module supports an ISID based TransportID,
416  		 * save this value in binary from the fabric I_T Nexus now.
417  		 */
418  		if (se_tpg->se_tpg_tfo->sess_get_initiator_sid != NULL) {
419  			memset(&buf[0], 0, PR_REG_ISID_LEN);
420  			se_tpg->se_tpg_tfo->sess_get_initiator_sid(se_sess,
421  					&buf[0], PR_REG_ISID_LEN);
422  			se_sess->sess_bin_isid = get_unaligned_be64(&buf[0]);
423  		}
424  
425  		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
426  		/*
427  		 * The se_nacl->nacl_sess pointer will be set to the
428  		 * last active I_T Nexus for each struct se_node_acl.
429  		 */
430  		se_nacl->nacl_sess = se_sess;
431  
432  		list_add_tail(&se_sess->sess_acl_list,
433  			      &se_nacl->acl_sess_list);
434  		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
435  	}
436  	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
437  
438  	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
439  		se_tpg->se_tpg_tfo->fabric_name, se_sess->fabric_sess_ptr);
440  }
441  EXPORT_SYMBOL(__transport_register_session);
442  
transport_register_session(struct se_portal_group * se_tpg,struct se_node_acl * se_nacl,struct se_session * se_sess,void * fabric_sess_ptr)443  void transport_register_session(
444  	struct se_portal_group *se_tpg,
445  	struct se_node_acl *se_nacl,
446  	struct se_session *se_sess,
447  	void *fabric_sess_ptr)
448  {
449  	unsigned long flags;
450  
451  	spin_lock_irqsave(&se_tpg->session_lock, flags);
452  	__transport_register_session(se_tpg, se_nacl, se_sess, fabric_sess_ptr);
453  	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
454  }
455  EXPORT_SYMBOL(transport_register_session);
456  
457  struct se_session *
target_setup_session(struct se_portal_group * tpg,unsigned int tag_num,unsigned int tag_size,enum target_prot_op prot_op,const char * initiatorname,void * private,int (* callback)(struct se_portal_group *,struct se_session *,void *))458  target_setup_session(struct se_portal_group *tpg,
459  		     unsigned int tag_num, unsigned int tag_size,
460  		     enum target_prot_op prot_op,
461  		     const char *initiatorname, void *private,
462  		     int (*callback)(struct se_portal_group *,
463  				     struct se_session *, void *))
464  {
465  	struct target_cmd_counter *cmd_cnt;
466  	struct se_session *sess;
467  	int rc;
468  
469  	cmd_cnt = target_alloc_cmd_counter();
470  	if (!cmd_cnt)
471  		return ERR_PTR(-ENOMEM);
472  	/*
473  	 * If the fabric driver is using percpu-ida based pre allocation
474  	 * of I/O descriptor tags, go ahead and perform that setup now..
475  	 */
476  	if (tag_num != 0)
477  		sess = transport_init_session_tags(tag_num, tag_size, prot_op);
478  	else
479  		sess = transport_alloc_session(prot_op);
480  
481  	if (IS_ERR(sess)) {
482  		rc = PTR_ERR(sess);
483  		goto free_cnt;
484  	}
485  	sess->cmd_cnt = cmd_cnt;
486  
487  	sess->se_node_acl = core_tpg_check_initiator_node_acl(tpg,
488  					(unsigned char *)initiatorname);
489  	if (!sess->se_node_acl) {
490  		rc = -EACCES;
491  		goto free_sess;
492  	}
493  	/*
494  	 * Go ahead and perform any remaining fabric setup that is
495  	 * required before transport_register_session().
496  	 */
497  	if (callback != NULL) {
498  		rc = callback(tpg, sess, private);
499  		if (rc)
500  			goto free_sess;
501  	}
502  
503  	transport_register_session(tpg, sess->se_node_acl, sess, private);
504  	return sess;
505  
506  free_sess:
507  	transport_free_session(sess);
508  	return ERR_PTR(rc);
509  
510  free_cnt:
511  	target_free_cmd_counter(cmd_cnt);
512  	return ERR_PTR(rc);
513  }
514  EXPORT_SYMBOL(target_setup_session);
515  
target_show_dynamic_sessions(struct se_portal_group * se_tpg,char * page)516  ssize_t target_show_dynamic_sessions(struct se_portal_group *se_tpg, char *page)
517  {
518  	struct se_session *se_sess;
519  	ssize_t len = 0;
520  
521  	spin_lock_bh(&se_tpg->session_lock);
522  	list_for_each_entry(se_sess, &se_tpg->tpg_sess_list, sess_list) {
523  		if (!se_sess->se_node_acl)
524  			continue;
525  		if (!se_sess->se_node_acl->dynamic_node_acl)
526  			continue;
527  		if (strlen(se_sess->se_node_acl->initiatorname) + 1 + len > PAGE_SIZE)
528  			break;
529  
530  		len += snprintf(page + len, PAGE_SIZE - len, "%s\n",
531  				se_sess->se_node_acl->initiatorname);
532  		len += 1; /* Include NULL terminator */
533  	}
534  	spin_unlock_bh(&se_tpg->session_lock);
535  
536  	return len;
537  }
538  EXPORT_SYMBOL(target_show_dynamic_sessions);
539  
target_complete_nacl(struct kref * kref)540  static void target_complete_nacl(struct kref *kref)
541  {
542  	struct se_node_acl *nacl = container_of(kref,
543  				struct se_node_acl, acl_kref);
544  	struct se_portal_group *se_tpg = nacl->se_tpg;
545  
546  	if (!nacl->dynamic_stop) {
547  		complete(&nacl->acl_free_comp);
548  		return;
549  	}
550  
551  	mutex_lock(&se_tpg->acl_node_mutex);
552  	list_del_init(&nacl->acl_list);
553  	mutex_unlock(&se_tpg->acl_node_mutex);
554  
555  	core_tpg_wait_for_nacl_pr_ref(nacl);
556  	core_free_device_list_for_node(nacl, se_tpg);
557  	kfree(nacl);
558  }
559  
target_put_nacl(struct se_node_acl * nacl)560  void target_put_nacl(struct se_node_acl *nacl)
561  {
562  	kref_put(&nacl->acl_kref, target_complete_nacl);
563  }
564  EXPORT_SYMBOL(target_put_nacl);
565  
transport_deregister_session_configfs(struct se_session * se_sess)566  void transport_deregister_session_configfs(struct se_session *se_sess)
567  {
568  	struct se_node_acl *se_nacl;
569  	unsigned long flags;
570  	/*
571  	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
572  	 */
573  	se_nacl = se_sess->se_node_acl;
574  	if (se_nacl) {
575  		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
576  		if (!list_empty(&se_sess->sess_acl_list))
577  			list_del_init(&se_sess->sess_acl_list);
578  		/*
579  		 * If the session list is empty, then clear the pointer.
580  		 * Otherwise, set the struct se_session pointer from the tail
581  		 * element of the per struct se_node_acl active session list.
582  		 */
583  		if (list_empty(&se_nacl->acl_sess_list))
584  			se_nacl->nacl_sess = NULL;
585  		else {
586  			se_nacl->nacl_sess = container_of(
587  					se_nacl->acl_sess_list.prev,
588  					struct se_session, sess_acl_list);
589  		}
590  		spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
591  	}
592  }
593  EXPORT_SYMBOL(transport_deregister_session_configfs);
594  
transport_free_session(struct se_session * se_sess)595  void transport_free_session(struct se_session *se_sess)
596  {
597  	struct se_node_acl *se_nacl = se_sess->se_node_acl;
598  
599  	/*
600  	 * Drop the se_node_acl->nacl_kref obtained from within
601  	 * core_tpg_get_initiator_node_acl().
602  	 */
603  	if (se_nacl) {
604  		struct se_portal_group *se_tpg = se_nacl->se_tpg;
605  		const struct target_core_fabric_ops *se_tfo = se_tpg->se_tpg_tfo;
606  		unsigned long flags;
607  
608  		se_sess->se_node_acl = NULL;
609  
610  		/*
611  		 * Also determine if we need to drop the extra ->cmd_kref if
612  		 * it had been previously dynamically generated, and
613  		 * the endpoint is not caching dynamic ACLs.
614  		 */
615  		mutex_lock(&se_tpg->acl_node_mutex);
616  		if (se_nacl->dynamic_node_acl &&
617  		    !se_tfo->tpg_check_demo_mode_cache(se_tpg)) {
618  			spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
619  			if (list_empty(&se_nacl->acl_sess_list))
620  				se_nacl->dynamic_stop = true;
621  			spin_unlock_irqrestore(&se_nacl->nacl_sess_lock, flags);
622  
623  			if (se_nacl->dynamic_stop)
624  				list_del_init(&se_nacl->acl_list);
625  		}
626  		mutex_unlock(&se_tpg->acl_node_mutex);
627  
628  		if (se_nacl->dynamic_stop)
629  			target_put_nacl(se_nacl);
630  
631  		target_put_nacl(se_nacl);
632  	}
633  	if (se_sess->sess_cmd_map) {
634  		sbitmap_queue_free(&se_sess->sess_tag_pool);
635  		kvfree(se_sess->sess_cmd_map);
636  	}
637  	if (se_sess->cmd_cnt)
638  		target_free_cmd_counter(se_sess->cmd_cnt);
639  	kmem_cache_free(se_sess_cache, se_sess);
640  }
641  EXPORT_SYMBOL(transport_free_session);
642  
target_release_res(struct se_device * dev,void * data)643  static int target_release_res(struct se_device *dev, void *data)
644  {
645  	struct se_session *sess = data;
646  
647  	if (dev->reservation_holder == sess)
648  		target_release_reservation(dev);
649  	return 0;
650  }
651  
transport_deregister_session(struct se_session * se_sess)652  void transport_deregister_session(struct se_session *se_sess)
653  {
654  	struct se_portal_group *se_tpg = se_sess->se_tpg;
655  	unsigned long flags;
656  
657  	if (!se_tpg) {
658  		transport_free_session(se_sess);
659  		return;
660  	}
661  
662  	spin_lock_irqsave(&se_tpg->session_lock, flags);
663  	list_del(&se_sess->sess_list);
664  	se_sess->se_tpg = NULL;
665  	se_sess->fabric_sess_ptr = NULL;
666  	spin_unlock_irqrestore(&se_tpg->session_lock, flags);
667  
668  	/*
669  	 * Since the session is being removed, release SPC-2
670  	 * reservations held by the session that is disappearing.
671  	 */
672  	target_for_each_device(target_release_res, se_sess);
673  
674  	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
675  		se_tpg->se_tpg_tfo->fabric_name);
676  	/*
677  	 * If last kref is dropping now for an explicit NodeACL, awake sleeping
678  	 * ->acl_free_comp caller to wakeup configfs se_node_acl->acl_group
679  	 * removal context from within transport_free_session() code.
680  	 *
681  	 * For dynamic ACL, target_put_nacl() uses target_complete_nacl()
682  	 * to release all remaining generate_node_acl=1 created ACL resources.
683  	 */
684  
685  	transport_free_session(se_sess);
686  }
687  EXPORT_SYMBOL(transport_deregister_session);
688  
target_remove_session(struct se_session * se_sess)689  void target_remove_session(struct se_session *se_sess)
690  {
691  	transport_deregister_session_configfs(se_sess);
692  	transport_deregister_session(se_sess);
693  }
694  EXPORT_SYMBOL(target_remove_session);
695  
target_remove_from_state_list(struct se_cmd * cmd)696  static void target_remove_from_state_list(struct se_cmd *cmd)
697  {
698  	struct se_device *dev = cmd->se_dev;
699  	unsigned long flags;
700  
701  	if (!dev)
702  		return;
703  
704  	spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
705  	if (cmd->state_active) {
706  		list_del(&cmd->state_list);
707  		cmd->state_active = false;
708  	}
709  	spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
710  }
711  
target_remove_from_tmr_list(struct se_cmd * cmd)712  static void target_remove_from_tmr_list(struct se_cmd *cmd)
713  {
714  	struct se_device *dev = NULL;
715  	unsigned long flags;
716  
717  	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
718  		dev = cmd->se_tmr_req->tmr_dev;
719  
720  	if (dev) {
721  		spin_lock_irqsave(&dev->se_tmr_lock, flags);
722  		if (cmd->se_tmr_req->tmr_dev)
723  			list_del_init(&cmd->se_tmr_req->tmr_list);
724  		spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
725  	}
726  }
727  /*
728   * This function is called by the target core after the target core has
729   * finished processing a SCSI command or SCSI TMF. Both the regular command
730   * processing code and the code for aborting commands can call this
731   * function. CMD_T_STOP is set if and only if another thread is waiting
732   * inside transport_wait_for_tasks() for t_transport_stop_comp.
733   */
transport_cmd_check_stop_to_fabric(struct se_cmd * cmd)734  static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
735  {
736  	unsigned long flags;
737  
738  	spin_lock_irqsave(&cmd->t_state_lock, flags);
739  	/*
740  	 * Determine if frontend context caller is requesting the stopping of
741  	 * this command for frontend exceptions.
742  	 */
743  	if (cmd->transport_state & CMD_T_STOP) {
744  		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
745  			__func__, __LINE__, cmd->tag);
746  
747  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
748  
749  		complete_all(&cmd->t_transport_stop_comp);
750  		return 1;
751  	}
752  	cmd->transport_state &= ~CMD_T_ACTIVE;
753  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
754  
755  	/*
756  	 * Some fabric modules like tcm_loop can release their internally
757  	 * allocated I/O reference and struct se_cmd now.
758  	 *
759  	 * Fabric modules are expected to return '1' here if the se_cmd being
760  	 * passed is released at this point, or zero if not being released.
761  	 */
762  	return cmd->se_tfo->check_stop_free(cmd);
763  }
764  
transport_lun_remove_cmd(struct se_cmd * cmd)765  static void transport_lun_remove_cmd(struct se_cmd *cmd)
766  {
767  	struct se_lun *lun = cmd->se_lun;
768  
769  	if (!lun)
770  		return;
771  
772  	target_remove_from_state_list(cmd);
773  	target_remove_from_tmr_list(cmd);
774  
775  	if (cmpxchg(&cmd->lun_ref_active, true, false))
776  		percpu_ref_put(&lun->lun_ref);
777  
778  	/*
779  	 * Clear struct se_cmd->se_lun before the handoff to FE.
780  	 */
781  	cmd->se_lun = NULL;
782  }
783  
target_complete_failure_work(struct work_struct * work)784  static void target_complete_failure_work(struct work_struct *work)
785  {
786  	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
787  
788  	transport_generic_request_failure(cmd, cmd->sense_reason);
789  }
790  
791  /*
792   * Used when asking transport to copy Sense Data from the underlying
793   * Linux/SCSI struct scsi_cmnd
794   */
transport_get_sense_buffer(struct se_cmd * cmd)795  static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd)
796  {
797  	struct se_device *dev = cmd->se_dev;
798  
799  	WARN_ON(!cmd->se_lun);
800  
801  	if (!dev)
802  		return NULL;
803  
804  	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION)
805  		return NULL;
806  
807  	cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER;
808  
809  	pr_debug("HBA_[%u]_PLUG[%s]: Requesting sense for SAM STATUS: 0x%02x\n",
810  		dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status);
811  	return cmd->sense_buffer;
812  }
813  
transport_copy_sense_to_cmd(struct se_cmd * cmd,unsigned char * sense)814  void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense)
815  {
816  	unsigned char *cmd_sense_buf;
817  	unsigned long flags;
818  
819  	spin_lock_irqsave(&cmd->t_state_lock, flags);
820  	cmd_sense_buf = transport_get_sense_buffer(cmd);
821  	if (!cmd_sense_buf) {
822  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
823  		return;
824  	}
825  
826  	cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE;
827  	memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length);
828  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
829  }
830  EXPORT_SYMBOL(transport_copy_sense_to_cmd);
831  
target_handle_abort(struct se_cmd * cmd)832  static void target_handle_abort(struct se_cmd *cmd)
833  {
834  	bool tas = cmd->transport_state & CMD_T_TAS;
835  	bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF;
836  	int ret;
837  
838  	pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas);
839  
840  	if (tas) {
841  		if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
842  			cmd->scsi_status = SAM_STAT_TASK_ABORTED;
843  			pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x, ITT: 0x%08llx\n",
844  				 cmd->t_task_cdb[0], cmd->tag);
845  			trace_target_cmd_complete(cmd);
846  			ret = cmd->se_tfo->queue_status(cmd);
847  			if (ret) {
848  				transport_handle_queue_full(cmd, cmd->se_dev,
849  							    ret, false);
850  				return;
851  			}
852  		} else {
853  			cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED;
854  			cmd->se_tfo->queue_tm_rsp(cmd);
855  		}
856  	} else {
857  		/*
858  		 * Allow the fabric driver to unmap any resources before
859  		 * releasing the descriptor via TFO->release_cmd().
860  		 */
861  		cmd->se_tfo->aborted_task(cmd);
862  		if (ack_kref)
863  			WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0);
864  		/*
865  		 * To do: establish a unit attention condition on the I_T
866  		 * nexus associated with cmd. See also the paragraph "Aborting
867  		 * commands" in SAM.
868  		 */
869  	}
870  
871  	WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
872  
873  	transport_lun_remove_cmd(cmd);
874  
875  	transport_cmd_check_stop_to_fabric(cmd);
876  }
877  
target_abort_work(struct work_struct * work)878  static void target_abort_work(struct work_struct *work)
879  {
880  	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
881  
882  	target_handle_abort(cmd);
883  }
884  
target_cmd_interrupted(struct se_cmd * cmd)885  static bool target_cmd_interrupted(struct se_cmd *cmd)
886  {
887  	int post_ret;
888  
889  	if (cmd->transport_state & CMD_T_ABORTED) {
890  		if (cmd->transport_complete_callback)
891  			cmd->transport_complete_callback(cmd, false, &post_ret);
892  		INIT_WORK(&cmd->work, target_abort_work);
893  		queue_work(target_completion_wq, &cmd->work);
894  		return true;
895  	} else if (cmd->transport_state & CMD_T_STOP) {
896  		if (cmd->transport_complete_callback)
897  			cmd->transport_complete_callback(cmd, false, &post_ret);
898  		complete_all(&cmd->t_transport_stop_comp);
899  		return true;
900  	}
901  
902  	return false;
903  }
904  
905  /* May be called from interrupt context so must not sleep. */
target_complete_cmd_with_sense(struct se_cmd * cmd,u8 scsi_status,sense_reason_t sense_reason)906  void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status,
907  				    sense_reason_t sense_reason)
908  {
909  	struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn;
910  	int success, cpu;
911  	unsigned long flags;
912  
913  	if (target_cmd_interrupted(cmd))
914  		return;
915  
916  	cmd->scsi_status = scsi_status;
917  	cmd->sense_reason = sense_reason;
918  
919  	spin_lock_irqsave(&cmd->t_state_lock, flags);
920  	switch (cmd->scsi_status) {
921  	case SAM_STAT_CHECK_CONDITION:
922  		if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
923  			success = 1;
924  		else
925  			success = 0;
926  		break;
927  	default:
928  		success = 1;
929  		break;
930  	}
931  
932  	cmd->t_state = TRANSPORT_COMPLETE;
933  	cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
934  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
935  
936  	INIT_WORK(&cmd->work, success ? target_complete_ok_work :
937  		  target_complete_failure_work);
938  
939  	if (!wwn || wwn->cmd_compl_affinity == SE_COMPL_AFFINITY_CPUID)
940  		cpu = cmd->cpuid;
941  	else
942  		cpu = wwn->cmd_compl_affinity;
943  
944  	queue_work_on(cpu, target_completion_wq, &cmd->work);
945  }
946  EXPORT_SYMBOL(target_complete_cmd_with_sense);
947  
target_complete_cmd(struct se_cmd * cmd,u8 scsi_status)948  void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status)
949  {
950  	target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ?
951  			      TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE :
952  			      TCM_NO_SENSE);
953  }
954  EXPORT_SYMBOL(target_complete_cmd);
955  
target_set_cmd_data_length(struct se_cmd * cmd,int length)956  void target_set_cmd_data_length(struct se_cmd *cmd, int length)
957  {
958  	if (length < cmd->data_length) {
959  		if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
960  			cmd->residual_count += cmd->data_length - length;
961  		} else {
962  			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
963  			cmd->residual_count = cmd->data_length - length;
964  		}
965  
966  		cmd->data_length = length;
967  	}
968  }
969  EXPORT_SYMBOL(target_set_cmd_data_length);
970  
target_complete_cmd_with_length(struct se_cmd * cmd,u8 scsi_status,int length)971  void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length)
972  {
973  	if (scsi_status == SAM_STAT_GOOD ||
974  	    cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) {
975  		target_set_cmd_data_length(cmd, length);
976  	}
977  
978  	target_complete_cmd(cmd, scsi_status);
979  }
980  EXPORT_SYMBOL(target_complete_cmd_with_length);
981  
target_add_to_state_list(struct se_cmd * cmd)982  static void target_add_to_state_list(struct se_cmd *cmd)
983  {
984  	struct se_device *dev = cmd->se_dev;
985  	unsigned long flags;
986  
987  	spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags);
988  	if (!cmd->state_active) {
989  		list_add_tail(&cmd->state_list,
990  			      &dev->queues[cmd->cpuid].state_list);
991  		cmd->state_active = true;
992  	}
993  	spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags);
994  }
995  
996  /*
997   * Handle QUEUE_FULL / -EAGAIN and -ENOMEM status
998   */
999  static void transport_write_pending_qf(struct se_cmd *cmd);
1000  static void transport_complete_qf(struct se_cmd *cmd);
1001  
target_qf_do_work(struct work_struct * work)1002  void target_qf_do_work(struct work_struct *work)
1003  {
1004  	struct se_device *dev = container_of(work, struct se_device,
1005  					qf_work_queue);
1006  	LIST_HEAD(qf_cmd_list);
1007  	struct se_cmd *cmd, *cmd_tmp;
1008  
1009  	spin_lock_irq(&dev->qf_cmd_lock);
1010  	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list);
1011  	spin_unlock_irq(&dev->qf_cmd_lock);
1012  
1013  	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
1014  		list_del(&cmd->se_qf_node);
1015  		atomic_dec_mb(&dev->dev_qf_count);
1016  
1017  		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
1018  			" context: %s\n", cmd->se_tfo->fabric_name, cmd,
1019  			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :
1020  			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
1021  			: "UNKNOWN");
1022  
1023  		if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP)
1024  			transport_write_pending_qf(cmd);
1025  		else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK ||
1026  			 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR)
1027  			transport_complete_qf(cmd);
1028  	}
1029  }
1030  
transport_dump_cmd_direction(struct se_cmd * cmd)1031  unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd)
1032  {
1033  	switch (cmd->data_direction) {
1034  	case DMA_NONE:
1035  		return "NONE";
1036  	case DMA_FROM_DEVICE:
1037  		return "READ";
1038  	case DMA_TO_DEVICE:
1039  		return "WRITE";
1040  	case DMA_BIDIRECTIONAL:
1041  		return "BIDI";
1042  	default:
1043  		break;
1044  	}
1045  
1046  	return "UNKNOWN";
1047  }
1048  
transport_dump_dev_state(struct se_device * dev,char * b,int * bl)1049  void transport_dump_dev_state(
1050  	struct se_device *dev,
1051  	char *b,
1052  	int *bl)
1053  {
1054  	*bl += sprintf(b + *bl, "Status: ");
1055  	if (dev->export_count)
1056  		*bl += sprintf(b + *bl, "ACTIVATED");
1057  	else
1058  		*bl += sprintf(b + *bl, "DEACTIVATED");
1059  
1060  	*bl += sprintf(b + *bl, "  Max Queue Depth: %d", dev->queue_depth);
1061  	*bl += sprintf(b + *bl, "  SectorSize: %u  HwMaxSectors: %u\n",
1062  		dev->dev_attrib.block_size,
1063  		dev->dev_attrib.hw_max_sectors);
1064  	*bl += sprintf(b + *bl, "        ");
1065  }
1066  
transport_dump_vpd_proto_id(struct t10_vpd * vpd,unsigned char * p_buf,int p_buf_len)1067  void transport_dump_vpd_proto_id(
1068  	struct t10_vpd *vpd,
1069  	unsigned char *p_buf,
1070  	int p_buf_len)
1071  {
1072  	unsigned char buf[VPD_TMP_BUF_SIZE];
1073  	int len;
1074  
1075  	memset(buf, 0, VPD_TMP_BUF_SIZE);
1076  	len = sprintf(buf, "T10 VPD Protocol Identifier: ");
1077  
1078  	switch (vpd->protocol_identifier) {
1079  	case 0x00:
1080  		sprintf(buf+len, "Fibre Channel\n");
1081  		break;
1082  	case 0x10:
1083  		sprintf(buf+len, "Parallel SCSI\n");
1084  		break;
1085  	case 0x20:
1086  		sprintf(buf+len, "SSA\n");
1087  		break;
1088  	case 0x30:
1089  		sprintf(buf+len, "IEEE 1394\n");
1090  		break;
1091  	case 0x40:
1092  		sprintf(buf+len, "SCSI Remote Direct Memory Access"
1093  				" Protocol\n");
1094  		break;
1095  	case 0x50:
1096  		sprintf(buf+len, "Internet SCSI (iSCSI)\n");
1097  		break;
1098  	case 0x60:
1099  		sprintf(buf+len, "SAS Serial SCSI Protocol\n");
1100  		break;
1101  	case 0x70:
1102  		sprintf(buf+len, "Automation/Drive Interface Transport"
1103  				" Protocol\n");
1104  		break;
1105  	case 0x80:
1106  		sprintf(buf+len, "AT Attachment Interface ATA/ATAPI\n");
1107  		break;
1108  	default:
1109  		sprintf(buf+len, "Unknown 0x%02x\n",
1110  				vpd->protocol_identifier);
1111  		break;
1112  	}
1113  
1114  	if (p_buf)
1115  		strncpy(p_buf, buf, p_buf_len);
1116  	else
1117  		pr_debug("%s", buf);
1118  }
1119  
1120  void
transport_set_vpd_proto_id(struct t10_vpd * vpd,unsigned char * page_83)1121  transport_set_vpd_proto_id(struct t10_vpd *vpd, unsigned char *page_83)
1122  {
1123  	/*
1124  	 * Check if the Protocol Identifier Valid (PIV) bit is set..
1125  	 *
1126  	 * from spc3r23.pdf section 7.5.1
1127  	 */
1128  	 if (page_83[1] & 0x80) {
1129  		vpd->protocol_identifier = (page_83[0] & 0xf0);
1130  		vpd->protocol_identifier_set = 1;
1131  		transport_dump_vpd_proto_id(vpd, NULL, 0);
1132  	}
1133  }
1134  EXPORT_SYMBOL(transport_set_vpd_proto_id);
1135  
transport_dump_vpd_assoc(struct t10_vpd * vpd,unsigned char * p_buf,int p_buf_len)1136  int transport_dump_vpd_assoc(
1137  	struct t10_vpd *vpd,
1138  	unsigned char *p_buf,
1139  	int p_buf_len)
1140  {
1141  	unsigned char buf[VPD_TMP_BUF_SIZE];
1142  	int ret = 0;
1143  	int len;
1144  
1145  	memset(buf, 0, VPD_TMP_BUF_SIZE);
1146  	len = sprintf(buf, "T10 VPD Identifier Association: ");
1147  
1148  	switch (vpd->association) {
1149  	case 0x00:
1150  		sprintf(buf+len, "addressed logical unit\n");
1151  		break;
1152  	case 0x10:
1153  		sprintf(buf+len, "target port\n");
1154  		break;
1155  	case 0x20:
1156  		sprintf(buf+len, "SCSI target device\n");
1157  		break;
1158  	default:
1159  		sprintf(buf+len, "Unknown 0x%02x\n", vpd->association);
1160  		ret = -EINVAL;
1161  		break;
1162  	}
1163  
1164  	if (p_buf)
1165  		strncpy(p_buf, buf, p_buf_len);
1166  	else
1167  		pr_debug("%s", buf);
1168  
1169  	return ret;
1170  }
1171  
transport_set_vpd_assoc(struct t10_vpd * vpd,unsigned char * page_83)1172  int transport_set_vpd_assoc(struct t10_vpd *vpd, unsigned char *page_83)
1173  {
1174  	/*
1175  	 * The VPD identification association..
1176  	 *
1177  	 * from spc3r23.pdf Section 7.6.3.1 Table 297
1178  	 */
1179  	vpd->association = (page_83[1] & 0x30);
1180  	return transport_dump_vpd_assoc(vpd, NULL, 0);
1181  }
1182  EXPORT_SYMBOL(transport_set_vpd_assoc);
1183  
transport_dump_vpd_ident_type(struct t10_vpd * vpd,unsigned char * p_buf,int p_buf_len)1184  int transport_dump_vpd_ident_type(
1185  	struct t10_vpd *vpd,
1186  	unsigned char *p_buf,
1187  	int p_buf_len)
1188  {
1189  	unsigned char buf[VPD_TMP_BUF_SIZE];
1190  	int ret = 0;
1191  	int len;
1192  
1193  	memset(buf, 0, VPD_TMP_BUF_SIZE);
1194  	len = sprintf(buf, "T10 VPD Identifier Type: ");
1195  
1196  	switch (vpd->device_identifier_type) {
1197  	case 0x00:
1198  		sprintf(buf+len, "Vendor specific\n");
1199  		break;
1200  	case 0x01:
1201  		sprintf(buf+len, "T10 Vendor ID based\n");
1202  		break;
1203  	case 0x02:
1204  		sprintf(buf+len, "EUI-64 based\n");
1205  		break;
1206  	case 0x03:
1207  		sprintf(buf+len, "NAA\n");
1208  		break;
1209  	case 0x04:
1210  		sprintf(buf+len, "Relative target port identifier\n");
1211  		break;
1212  	case 0x08:
1213  		sprintf(buf+len, "SCSI name string\n");
1214  		break;
1215  	default:
1216  		sprintf(buf+len, "Unsupported: 0x%02x\n",
1217  				vpd->device_identifier_type);
1218  		ret = -EINVAL;
1219  		break;
1220  	}
1221  
1222  	if (p_buf) {
1223  		if (p_buf_len < strlen(buf)+1)
1224  			return -EINVAL;
1225  		strncpy(p_buf, buf, p_buf_len);
1226  	} else {
1227  		pr_debug("%s", buf);
1228  	}
1229  
1230  	return ret;
1231  }
1232  
transport_set_vpd_ident_type(struct t10_vpd * vpd,unsigned char * page_83)1233  int transport_set_vpd_ident_type(struct t10_vpd *vpd, unsigned char *page_83)
1234  {
1235  	/*
1236  	 * The VPD identifier type..
1237  	 *
1238  	 * from spc3r23.pdf Section 7.6.3.1 Table 298
1239  	 */
1240  	vpd->device_identifier_type = (page_83[1] & 0x0f);
1241  	return transport_dump_vpd_ident_type(vpd, NULL, 0);
1242  }
1243  EXPORT_SYMBOL(transport_set_vpd_ident_type);
1244  
transport_dump_vpd_ident(struct t10_vpd * vpd,unsigned char * p_buf,int p_buf_len)1245  int transport_dump_vpd_ident(
1246  	struct t10_vpd *vpd,
1247  	unsigned char *p_buf,
1248  	int p_buf_len)
1249  {
1250  	unsigned char buf[VPD_TMP_BUF_SIZE];
1251  	int ret = 0;
1252  
1253  	memset(buf, 0, VPD_TMP_BUF_SIZE);
1254  
1255  	switch (vpd->device_identifier_code_set) {
1256  	case 0x01: /* Binary */
1257  		snprintf(buf, sizeof(buf),
1258  			"T10 VPD Binary Device Identifier: %s\n",
1259  			&vpd->device_identifier[0]);
1260  		break;
1261  	case 0x02: /* ASCII */
1262  		snprintf(buf, sizeof(buf),
1263  			"T10 VPD ASCII Device Identifier: %s\n",
1264  			&vpd->device_identifier[0]);
1265  		break;
1266  	case 0x03: /* UTF-8 */
1267  		snprintf(buf, sizeof(buf),
1268  			"T10 VPD UTF-8 Device Identifier: %s\n",
1269  			&vpd->device_identifier[0]);
1270  		break;
1271  	default:
1272  		sprintf(buf, "T10 VPD Device Identifier encoding unsupported:"
1273  			" 0x%02x", vpd->device_identifier_code_set);
1274  		ret = -EINVAL;
1275  		break;
1276  	}
1277  
1278  	if (p_buf)
1279  		strncpy(p_buf, buf, p_buf_len);
1280  	else
1281  		pr_debug("%s", buf);
1282  
1283  	return ret;
1284  }
1285  
1286  int
transport_set_vpd_ident(struct t10_vpd * vpd,unsigned char * page_83)1287  transport_set_vpd_ident(struct t10_vpd *vpd, unsigned char *page_83)
1288  {
1289  	static const char hex_str[] = "0123456789abcdef";
1290  	int j = 0, i = 4; /* offset to start of the identifier */
1291  
1292  	/*
1293  	 * The VPD Code Set (encoding)
1294  	 *
1295  	 * from spc3r23.pdf Section 7.6.3.1 Table 296
1296  	 */
1297  	vpd->device_identifier_code_set = (page_83[0] & 0x0f);
1298  	switch (vpd->device_identifier_code_set) {
1299  	case 0x01: /* Binary */
1300  		vpd->device_identifier[j++] =
1301  				hex_str[vpd->device_identifier_type];
1302  		while (i < (4 + page_83[3])) {
1303  			vpd->device_identifier[j++] =
1304  				hex_str[(page_83[i] & 0xf0) >> 4];
1305  			vpd->device_identifier[j++] =
1306  				hex_str[page_83[i] & 0x0f];
1307  			i++;
1308  		}
1309  		break;
1310  	case 0x02: /* ASCII */
1311  	case 0x03: /* UTF-8 */
1312  		while (i < (4 + page_83[3]))
1313  			vpd->device_identifier[j++] = page_83[i++];
1314  		break;
1315  	default:
1316  		break;
1317  	}
1318  
1319  	return transport_dump_vpd_ident(vpd, NULL, 0);
1320  }
1321  EXPORT_SYMBOL(transport_set_vpd_ident);
1322  
1323  static sense_reason_t
target_check_max_data_sg_nents(struct se_cmd * cmd,struct se_device * dev,unsigned int size)1324  target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev,
1325  			       unsigned int size)
1326  {
1327  	u32 mtl;
1328  
1329  	if (!cmd->se_tfo->max_data_sg_nents)
1330  		return TCM_NO_SENSE;
1331  	/*
1332  	 * Check if fabric enforced maximum SGL entries per I/O descriptor
1333  	 * exceeds se_cmd->data_length.  If true, set SCF_UNDERFLOW_BIT +
1334  	 * residual_count and reduce original cmd->data_length to maximum
1335  	 * length based on single PAGE_SIZE entry scatter-lists.
1336  	 */
1337  	mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE);
1338  	if (cmd->data_length > mtl) {
1339  		/*
1340  		 * If an existing CDB overflow is present, calculate new residual
1341  		 * based on CDB size minus fabric maximum transfer length.
1342  		 *
1343  		 * If an existing CDB underflow is present, calculate new residual
1344  		 * based on original cmd->data_length minus fabric maximum transfer
1345  		 * length.
1346  		 *
1347  		 * Otherwise, set the underflow residual based on cmd->data_length
1348  		 * minus fabric maximum transfer length.
1349  		 */
1350  		if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1351  			cmd->residual_count = (size - mtl);
1352  		} else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1353  			u32 orig_dl = size + cmd->residual_count;
1354  			cmd->residual_count = (orig_dl - mtl);
1355  		} else {
1356  			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1357  			cmd->residual_count = (cmd->data_length - mtl);
1358  		}
1359  		cmd->data_length = mtl;
1360  		/*
1361  		 * Reset sbc_check_prot() calculated protection payload
1362  		 * length based upon the new smaller MTL.
1363  		 */
1364  		if (cmd->prot_length) {
1365  			u32 sectors = (mtl / dev->dev_attrib.block_size);
1366  			cmd->prot_length = dev->prot_length * sectors;
1367  		}
1368  	}
1369  	return TCM_NO_SENSE;
1370  }
1371  
1372  /**
1373   * target_cmd_size_check - Check whether there will be a residual.
1374   * @cmd: SCSI command.
1375   * @size: Data buffer size derived from CDB. The data buffer size provided by
1376   *   the SCSI transport driver is available in @cmd->data_length.
1377   *
1378   * Compare the data buffer size from the CDB with the data buffer limit from the transport
1379   * header. Set @cmd->residual_count and SCF_OVERFLOW_BIT or SCF_UNDERFLOW_BIT if necessary.
1380   *
1381   * Note: target drivers set @cmd->data_length by calling __target_init_cmd().
1382   *
1383   * Return: TCM_NO_SENSE
1384   */
1385  sense_reason_t
target_cmd_size_check(struct se_cmd * cmd,unsigned int size)1386  target_cmd_size_check(struct se_cmd *cmd, unsigned int size)
1387  {
1388  	struct se_device *dev = cmd->se_dev;
1389  
1390  	if (cmd->unknown_data_length) {
1391  		cmd->data_length = size;
1392  	} else if (size != cmd->data_length) {
1393  		pr_warn_ratelimited("TARGET_CORE[%s]: Expected Transfer Length:"
1394  			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
1395  			" 0x%02x\n", cmd->se_tfo->fabric_name,
1396  				cmd->data_length, size, cmd->t_task_cdb[0]);
1397  		/*
1398  		 * For READ command for the overflow case keep the existing
1399  		 * fabric provided ->data_length. Otherwise for the underflow
1400  		 * case, reset ->data_length to the smaller SCSI expected data
1401  		 * transfer length.
1402  		 */
1403  		if (size > cmd->data_length) {
1404  			cmd->se_cmd_flags |= SCF_OVERFLOW_BIT;
1405  			cmd->residual_count = (size - cmd->data_length);
1406  		} else {
1407  			cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
1408  			cmd->residual_count = (cmd->data_length - size);
1409  			/*
1410  			 * Do not truncate ->data_length for WRITE command to
1411  			 * dump all payload
1412  			 */
1413  			if (cmd->data_direction == DMA_FROM_DEVICE) {
1414  				cmd->data_length = size;
1415  			}
1416  		}
1417  
1418  		if (cmd->data_direction == DMA_TO_DEVICE) {
1419  			if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
1420  				pr_err_ratelimited("Rejecting underflow/overflow"
1421  						   " for WRITE data CDB\n");
1422  				return TCM_INVALID_FIELD_IN_COMMAND_IU;
1423  			}
1424  			/*
1425  			 * Some fabric drivers like iscsi-target still expect to
1426  			 * always reject overflow writes.  Reject this case until
1427  			 * full fabric driver level support for overflow writes
1428  			 * is introduced tree-wide.
1429  			 */
1430  			if (size > cmd->data_length) {
1431  				pr_err_ratelimited("Rejecting overflow for"
1432  						   " WRITE control CDB\n");
1433  				return TCM_INVALID_CDB_FIELD;
1434  			}
1435  		}
1436  	}
1437  
1438  	return target_check_max_data_sg_nents(cmd, dev, size);
1439  
1440  }
1441  
1442  /*
1443   * Used by fabric modules containing a local struct se_cmd within their
1444   * fabric dependent per I/O descriptor.
1445   *
1446   * Preserves the value of @cmd->tag.
1447   */
__target_init_cmd(struct se_cmd * cmd,const struct target_core_fabric_ops * tfo,struct se_session * se_sess,u32 data_length,int data_direction,int task_attr,unsigned char * sense_buffer,u64 unpacked_lun,struct target_cmd_counter * cmd_cnt)1448  void __target_init_cmd(struct se_cmd *cmd,
1449  		       const struct target_core_fabric_ops *tfo,
1450  		       struct se_session *se_sess, u32 data_length,
1451  		       int data_direction, int task_attr,
1452  		       unsigned char *sense_buffer, u64 unpacked_lun,
1453  		       struct target_cmd_counter *cmd_cnt)
1454  {
1455  	INIT_LIST_HEAD(&cmd->se_delayed_node);
1456  	INIT_LIST_HEAD(&cmd->se_qf_node);
1457  	INIT_LIST_HEAD(&cmd->state_list);
1458  	init_completion(&cmd->t_transport_stop_comp);
1459  	cmd->free_compl = NULL;
1460  	cmd->abrt_compl = NULL;
1461  	spin_lock_init(&cmd->t_state_lock);
1462  	INIT_WORK(&cmd->work, NULL);
1463  	kref_init(&cmd->cmd_kref);
1464  
1465  	cmd->t_task_cdb = &cmd->__t_task_cdb[0];
1466  	cmd->se_tfo = tfo;
1467  	cmd->se_sess = se_sess;
1468  	cmd->data_length = data_length;
1469  	cmd->data_direction = data_direction;
1470  	cmd->sam_task_attr = task_attr;
1471  	cmd->sense_buffer = sense_buffer;
1472  	cmd->orig_fe_lun = unpacked_lun;
1473  	cmd->cmd_cnt = cmd_cnt;
1474  
1475  	if (!(cmd->se_cmd_flags & SCF_USE_CPUID))
1476  		cmd->cpuid = raw_smp_processor_id();
1477  
1478  	cmd->state_active = false;
1479  }
1480  EXPORT_SYMBOL(__target_init_cmd);
1481  
1482  static sense_reason_t
transport_check_alloc_task_attr(struct se_cmd * cmd)1483  transport_check_alloc_task_attr(struct se_cmd *cmd)
1484  {
1485  	struct se_device *dev = cmd->se_dev;
1486  
1487  	/*
1488  	 * Check if SAM Task Attribute emulation is enabled for this
1489  	 * struct se_device storage object
1490  	 */
1491  	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1492  		return 0;
1493  
1494  	if (cmd->sam_task_attr == TCM_ACA_TAG) {
1495  		pr_debug("SAM Task Attribute ACA"
1496  			" emulation is not supported\n");
1497  		return TCM_INVALID_CDB_FIELD;
1498  	}
1499  
1500  	return 0;
1501  }
1502  
1503  sense_reason_t
target_cmd_init_cdb(struct se_cmd * cmd,unsigned char * cdb,gfp_t gfp)1504  target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp)
1505  {
1506  	sense_reason_t ret;
1507  
1508  	/*
1509  	 * Ensure that the received CDB is less than the max (252 + 8) bytes
1510  	 * for VARIABLE_LENGTH_CMD
1511  	 */
1512  	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
1513  		pr_err("Received SCSI CDB with command_size: %d that"
1514  			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
1515  			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
1516  		ret = TCM_INVALID_CDB_FIELD;
1517  		goto err;
1518  	}
1519  	/*
1520  	 * If the received CDB is larger than TCM_MAX_COMMAND_SIZE,
1521  	 * allocate the additional extended CDB buffer now..  Otherwise
1522  	 * setup the pointer from __t_task_cdb to t_task_cdb.
1523  	 */
1524  	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
1525  		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp);
1526  		if (!cmd->t_task_cdb) {
1527  			pr_err("Unable to allocate cmd->t_task_cdb"
1528  				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
1529  				scsi_command_size(cdb),
1530  				(unsigned long)sizeof(cmd->__t_task_cdb));
1531  			ret = TCM_OUT_OF_RESOURCES;
1532  			goto err;
1533  		}
1534  	}
1535  	/*
1536  	 * Copy the original CDB into cmd->
1537  	 */
1538  	memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb));
1539  
1540  	trace_target_sequencer_start(cmd);
1541  	return 0;
1542  
1543  err:
1544  	/*
1545  	 * Copy the CDB here to allow trace_target_cmd_complete() to
1546  	 * print the cdb to the trace buffers.
1547  	 */
1548  	memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb),
1549  					 (unsigned int)TCM_MAX_COMMAND_SIZE));
1550  	return ret;
1551  }
1552  EXPORT_SYMBOL(target_cmd_init_cdb);
1553  
1554  sense_reason_t
target_cmd_parse_cdb(struct se_cmd * cmd)1555  target_cmd_parse_cdb(struct se_cmd *cmd)
1556  {
1557  	struct se_device *dev = cmd->se_dev;
1558  	sense_reason_t ret;
1559  
1560  	ret = dev->transport->parse_cdb(cmd);
1561  	if (ret == TCM_UNSUPPORTED_SCSI_OPCODE)
1562  		pr_debug_ratelimited("%s/%s: Unsupported SCSI Opcode 0x%02x, sending CHECK_CONDITION.\n",
1563  				     cmd->se_tfo->fabric_name,
1564  				     cmd->se_sess->se_node_acl->initiatorname,
1565  				     cmd->t_task_cdb[0]);
1566  	if (ret)
1567  		return ret;
1568  
1569  	ret = transport_check_alloc_task_attr(cmd);
1570  	if (ret)
1571  		return ret;
1572  
1573  	cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE;
1574  	atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus);
1575  	return 0;
1576  }
1577  EXPORT_SYMBOL(target_cmd_parse_cdb);
1578  
__target_submit(struct se_cmd * cmd)1579  static int __target_submit(struct se_cmd *cmd)
1580  {
1581  	sense_reason_t ret;
1582  
1583  	might_sleep();
1584  
1585  	/*
1586  	 * Check if we need to delay processing because of ALUA
1587  	 * Active/NonOptimized primary access state..
1588  	 */
1589  	core_alua_check_nonop_delay(cmd);
1590  
1591  	if (cmd->t_data_nents != 0) {
1592  		/*
1593  		 * This is primarily a hack for udev and tcm loop which sends
1594  		 * INQUIRYs with a single page and expects the data to be
1595  		 * cleared.
1596  		 */
1597  		if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) &&
1598  		    cmd->data_direction == DMA_FROM_DEVICE) {
1599  			struct scatterlist *sgl = cmd->t_data_sg;
1600  			unsigned char *buf = NULL;
1601  
1602  			BUG_ON(!sgl);
1603  
1604  			buf = kmap_local_page(sg_page(sgl));
1605  			if (buf) {
1606  				memset(buf + sgl->offset, 0, sgl->length);
1607  				kunmap_local(buf);
1608  			}
1609  		}
1610  	}
1611  
1612  	if (!cmd->se_lun) {
1613  		dump_stack();
1614  		pr_err("cmd->se_lun is NULL\n");
1615  		return -EINVAL;
1616  	}
1617  
1618  	/*
1619  	 * Set TRANSPORT_NEW_CMD state and CMD_T_ACTIVE to ensure that
1620  	 * outstanding descriptors are handled correctly during shutdown via
1621  	 * transport_wait_for_tasks()
1622  	 *
1623  	 * Also, we don't take cmd->t_state_lock here as we only expect
1624  	 * this to be called for initial descriptor submission.
1625  	 */
1626  	cmd->t_state = TRANSPORT_NEW_CMD;
1627  	cmd->transport_state |= CMD_T_ACTIVE;
1628  
1629  	/*
1630  	 * transport_generic_new_cmd() is already handling QUEUE_FULL,
1631  	 * so follow TRANSPORT_NEW_CMD processing thread context usage
1632  	 * and call transport_generic_request_failure() if necessary..
1633  	 */
1634  	ret = transport_generic_new_cmd(cmd);
1635  	if (ret)
1636  		transport_generic_request_failure(cmd, ret);
1637  	return 0;
1638  }
1639  
1640  sense_reason_t
transport_generic_map_mem_to_cmd(struct se_cmd * cmd,struct scatterlist * sgl,u32 sgl_count,struct scatterlist * sgl_bidi,u32 sgl_bidi_count)1641  transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
1642  		u32 sgl_count, struct scatterlist *sgl_bidi, u32 sgl_bidi_count)
1643  {
1644  	if (!sgl || !sgl_count)
1645  		return 0;
1646  
1647  	/*
1648  	 * Reject SCSI data overflow with map_mem_to_cmd() as incoming
1649  	 * scatterlists already have been set to follow what the fabric
1650  	 * passes for the original expected data transfer length.
1651  	 */
1652  	if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1653  		pr_warn("Rejecting SCSI DATA overflow for fabric using"
1654  			" SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC\n");
1655  		return TCM_INVALID_CDB_FIELD;
1656  	}
1657  
1658  	cmd->t_data_sg = sgl;
1659  	cmd->t_data_nents = sgl_count;
1660  	cmd->t_bidi_data_sg = sgl_bidi;
1661  	cmd->t_bidi_data_nents = sgl_bidi_count;
1662  
1663  	cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC;
1664  	return 0;
1665  }
1666  
1667  /**
1668   * target_init_cmd - initialize se_cmd
1669   * @se_cmd: command descriptor to init
1670   * @se_sess: associated se_sess for endpoint
1671   * @sense: pointer to SCSI sense buffer
1672   * @unpacked_lun: unpacked LUN to reference for struct se_lun
1673   * @data_length: fabric expected data transfer length
1674   * @task_attr: SAM task attribute
1675   * @data_dir: DMA data direction
1676   * @flags: flags for command submission from target_sc_flags_tables
1677   *
1678   * Task tags are supported if the caller has set @se_cmd->tag.
1679   *
1680   * Returns:
1681   *	- less than zero to signal active I/O shutdown failure.
1682   *	- zero on success.
1683   *
1684   * If the fabric driver calls target_stop_session, then it must check the
1685   * return code and handle failures. This will never fail for other drivers,
1686   * and the return code can be ignored.
1687   */
target_init_cmd(struct se_cmd * se_cmd,struct se_session * se_sess,unsigned char * sense,u64 unpacked_lun,u32 data_length,int task_attr,int data_dir,int flags)1688  int target_init_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1689  		    unsigned char *sense, u64 unpacked_lun,
1690  		    u32 data_length, int task_attr, int data_dir, int flags)
1691  {
1692  	struct se_portal_group *se_tpg;
1693  
1694  	se_tpg = se_sess->se_tpg;
1695  	BUG_ON(!se_tpg);
1696  	BUG_ON(se_cmd->se_tfo || se_cmd->se_sess);
1697  
1698  	if (flags & TARGET_SCF_USE_CPUID)
1699  		se_cmd->se_cmd_flags |= SCF_USE_CPUID;
1700  	/*
1701  	 * Signal bidirectional data payloads to target-core
1702  	 */
1703  	if (flags & TARGET_SCF_BIDI_OP)
1704  		se_cmd->se_cmd_flags |= SCF_BIDI;
1705  
1706  	if (flags & TARGET_SCF_UNKNOWN_SIZE)
1707  		se_cmd->unknown_data_length = 1;
1708  	/*
1709  	 * Initialize se_cmd for target operation.  From this point
1710  	 * exceptions are handled by sending exception status via
1711  	 * target_core_fabric_ops->queue_status() callback
1712  	 */
1713  	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess, data_length,
1714  			  data_dir, task_attr, sense, unpacked_lun,
1715  			  se_sess->cmd_cnt);
1716  
1717  	/*
1718  	 * Obtain struct se_cmd->cmd_kref reference. A second kref_get here is
1719  	 * necessary for fabrics using TARGET_SCF_ACK_KREF that expect a second
1720  	 * kref_put() to happen during fabric packet acknowledgement.
1721  	 */
1722  	return target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
1723  }
1724  EXPORT_SYMBOL_GPL(target_init_cmd);
1725  
1726  /**
1727   * target_submit_prep - prepare cmd for submission
1728   * @se_cmd: command descriptor to prep
1729   * @cdb: pointer to SCSI CDB
1730   * @sgl: struct scatterlist memory for unidirectional mapping
1731   * @sgl_count: scatterlist count for unidirectional mapping
1732   * @sgl_bidi: struct scatterlist memory for bidirectional READ mapping
1733   * @sgl_bidi_count: scatterlist count for bidirectional READ mapping
1734   * @sgl_prot: struct scatterlist memory protection information
1735   * @sgl_prot_count: scatterlist count for protection information
1736   * @gfp: gfp allocation type
1737   *
1738   * Returns:
1739   *	- less than zero to signal failure.
1740   *	- zero on success.
1741   *
1742   * If failure is returned, lio will the callers queue_status to complete
1743   * the cmd.
1744   */
target_submit_prep(struct se_cmd * se_cmd,unsigned char * cdb,struct scatterlist * sgl,u32 sgl_count,struct scatterlist * sgl_bidi,u32 sgl_bidi_count,struct scatterlist * sgl_prot,u32 sgl_prot_count,gfp_t gfp)1745  int target_submit_prep(struct se_cmd *se_cmd, unsigned char *cdb,
1746  		       struct scatterlist *sgl, u32 sgl_count,
1747  		       struct scatterlist *sgl_bidi, u32 sgl_bidi_count,
1748  		       struct scatterlist *sgl_prot, u32 sgl_prot_count,
1749  		       gfp_t gfp)
1750  {
1751  	sense_reason_t rc;
1752  
1753  	rc = target_cmd_init_cdb(se_cmd, cdb, gfp);
1754  	if (rc)
1755  		goto send_cc_direct;
1756  
1757  	/*
1758  	 * Locate se_lun pointer and attach it to struct se_cmd
1759  	 */
1760  	rc = transport_lookup_cmd_lun(se_cmd);
1761  	if (rc)
1762  		goto send_cc_direct;
1763  
1764  	rc = target_cmd_parse_cdb(se_cmd);
1765  	if (rc != 0)
1766  		goto generic_fail;
1767  
1768  	/*
1769  	 * Save pointers for SGLs containing protection information,
1770  	 * if present.
1771  	 */
1772  	if (sgl_prot_count) {
1773  		se_cmd->t_prot_sg = sgl_prot;
1774  		se_cmd->t_prot_nents = sgl_prot_count;
1775  		se_cmd->se_cmd_flags |= SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC;
1776  	}
1777  
1778  	/*
1779  	 * When a non zero sgl_count has been passed perform SGL passthrough
1780  	 * mapping for pre-allocated fabric memory instead of having target
1781  	 * core perform an internal SGL allocation..
1782  	 */
1783  	if (sgl_count != 0) {
1784  		BUG_ON(!sgl);
1785  
1786  		rc = transport_generic_map_mem_to_cmd(se_cmd, sgl, sgl_count,
1787  				sgl_bidi, sgl_bidi_count);
1788  		if (rc != 0)
1789  			goto generic_fail;
1790  	}
1791  
1792  	return 0;
1793  
1794  send_cc_direct:
1795  	transport_send_check_condition_and_sense(se_cmd, rc, 0);
1796  	target_put_sess_cmd(se_cmd);
1797  	return -EIO;
1798  
1799  generic_fail:
1800  	transport_generic_request_failure(se_cmd, rc);
1801  	return -EIO;
1802  }
1803  EXPORT_SYMBOL_GPL(target_submit_prep);
1804  
1805  /**
1806   * target_submit_cmd - lookup unpacked lun and submit uninitialized se_cmd
1807   *
1808   * @se_cmd: command descriptor to submit
1809   * @se_sess: associated se_sess for endpoint
1810   * @cdb: pointer to SCSI CDB
1811   * @sense: pointer to SCSI sense buffer
1812   * @unpacked_lun: unpacked LUN to reference for struct se_lun
1813   * @data_length: fabric expected data transfer length
1814   * @task_attr: SAM task attribute
1815   * @data_dir: DMA data direction
1816   * @flags: flags for command submission from target_sc_flags_tables
1817   *
1818   * Task tags are supported if the caller has set @se_cmd->tag.
1819   *
1820   * This may only be called from process context, and also currently
1821   * assumes internal allocation of fabric payload buffer by target-core.
1822   *
1823   * It also assumes interal target core SGL memory allocation.
1824   *
1825   * This function must only be used by drivers that do their own
1826   * sync during shutdown and does not use target_stop_session. If there
1827   * is a failure this function will call into the fabric driver's
1828   * queue_status with a CHECK_CONDITION.
1829   */
target_submit_cmd(struct se_cmd * se_cmd,struct se_session * se_sess,unsigned char * cdb,unsigned char * sense,u64 unpacked_lun,u32 data_length,int task_attr,int data_dir,int flags)1830  void target_submit_cmd(struct se_cmd *se_cmd, struct se_session *se_sess,
1831  		unsigned char *cdb, unsigned char *sense, u64 unpacked_lun,
1832  		u32 data_length, int task_attr, int data_dir, int flags)
1833  {
1834  	int rc;
1835  
1836  	rc = target_init_cmd(se_cmd, se_sess, sense, unpacked_lun, data_length,
1837  			     task_attr, data_dir, flags);
1838  	WARN(rc, "Invalid target_submit_cmd use. Driver must not use target_stop_session or call target_init_cmd directly.\n");
1839  	if (rc)
1840  		return;
1841  
1842  	if (target_submit_prep(se_cmd, cdb, NULL, 0, NULL, 0, NULL, 0,
1843  			       GFP_KERNEL))
1844  		return;
1845  
1846  	target_submit(se_cmd);
1847  }
1848  EXPORT_SYMBOL(target_submit_cmd);
1849  
1850  
target_plug_device(struct se_device * se_dev)1851  static struct se_dev_plug *target_plug_device(struct se_device *se_dev)
1852  {
1853  	struct se_dev_plug *se_plug;
1854  
1855  	if (!se_dev->transport->plug_device)
1856  		return NULL;
1857  
1858  	se_plug = se_dev->transport->plug_device(se_dev);
1859  	if (!se_plug)
1860  		return NULL;
1861  
1862  	se_plug->se_dev = se_dev;
1863  	/*
1864  	 * We have a ref to the lun at this point, but the cmds could
1865  	 * complete before we unplug, so grab a ref to the se_device so we
1866  	 * can call back into the backend.
1867  	 */
1868  	config_group_get(&se_dev->dev_group);
1869  	return se_plug;
1870  }
1871  
target_unplug_device(struct se_dev_plug * se_plug)1872  static void target_unplug_device(struct se_dev_plug *se_plug)
1873  {
1874  	struct se_device *se_dev = se_plug->se_dev;
1875  
1876  	se_dev->transport->unplug_device(se_plug);
1877  	config_group_put(&se_dev->dev_group);
1878  }
1879  
target_queued_submit_work(struct work_struct * work)1880  void target_queued_submit_work(struct work_struct *work)
1881  {
1882  	struct se_cmd_queue *sq = container_of(work, struct se_cmd_queue, work);
1883  	struct se_cmd *se_cmd, *next_cmd;
1884  	struct se_dev_plug *se_plug = NULL;
1885  	struct se_device *se_dev = NULL;
1886  	struct llist_node *cmd_list;
1887  
1888  	cmd_list = llist_del_all(&sq->cmd_list);
1889  	if (!cmd_list)
1890  		/* Previous call took what we were queued to submit */
1891  		return;
1892  
1893  	cmd_list = llist_reverse_order(cmd_list);
1894  	llist_for_each_entry_safe(se_cmd, next_cmd, cmd_list, se_cmd_list) {
1895  		if (!se_dev) {
1896  			se_dev = se_cmd->se_dev;
1897  			se_plug = target_plug_device(se_dev);
1898  		}
1899  
1900  		__target_submit(se_cmd);
1901  	}
1902  
1903  	if (se_plug)
1904  		target_unplug_device(se_plug);
1905  }
1906  
1907  /**
1908   * target_queue_submission - queue the cmd to run on the LIO workqueue
1909   * @se_cmd: command descriptor to submit
1910   */
target_queue_submission(struct se_cmd * se_cmd)1911  static void target_queue_submission(struct se_cmd *se_cmd)
1912  {
1913  	struct se_device *se_dev = se_cmd->se_dev;
1914  	int cpu = se_cmd->cpuid;
1915  	struct se_cmd_queue *sq;
1916  
1917  	sq = &se_dev->queues[cpu].sq;
1918  	llist_add(&se_cmd->se_cmd_list, &sq->cmd_list);
1919  	queue_work_on(cpu, target_submission_wq, &sq->work);
1920  }
1921  
1922  /**
1923   * target_submit - perform final initialization and submit cmd to LIO core
1924   * @se_cmd: command descriptor to submit
1925   *
1926   * target_submit_prep or something similar must have been called on the cmd,
1927   * and this must be called from process context.
1928   */
target_submit(struct se_cmd * se_cmd)1929  int target_submit(struct se_cmd *se_cmd)
1930  {
1931  	const struct target_core_fabric_ops *tfo = se_cmd->se_sess->se_tpg->se_tpg_tfo;
1932  	struct se_dev_attrib *da = &se_cmd->se_dev->dev_attrib;
1933  	u8 submit_type;
1934  
1935  	if (da->submit_type == TARGET_FABRIC_DEFAULT_SUBMIT)
1936  		submit_type = tfo->default_submit_type;
1937  	else if (da->submit_type == TARGET_DIRECT_SUBMIT &&
1938  		 tfo->direct_submit_supp)
1939  		submit_type = TARGET_DIRECT_SUBMIT;
1940  	else
1941  		submit_type = TARGET_QUEUE_SUBMIT;
1942  
1943  	if (submit_type == TARGET_DIRECT_SUBMIT)
1944  		return __target_submit(se_cmd);
1945  
1946  	target_queue_submission(se_cmd);
1947  	return 0;
1948  }
1949  EXPORT_SYMBOL_GPL(target_submit);
1950  
target_complete_tmr_failure(struct work_struct * work)1951  static void target_complete_tmr_failure(struct work_struct *work)
1952  {
1953  	struct se_cmd *se_cmd = container_of(work, struct se_cmd, work);
1954  
1955  	se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
1956  	se_cmd->se_tfo->queue_tm_rsp(se_cmd);
1957  
1958  	transport_lun_remove_cmd(se_cmd);
1959  	transport_cmd_check_stop_to_fabric(se_cmd);
1960  }
1961  
1962  /**
1963   * target_submit_tmr - lookup unpacked lun and submit uninitialized se_cmd
1964   *                     for TMR CDBs
1965   *
1966   * @se_cmd: command descriptor to submit
1967   * @se_sess: associated se_sess for endpoint
1968   * @sense: pointer to SCSI sense buffer
1969   * @unpacked_lun: unpacked LUN to reference for struct se_lun
1970   * @fabric_tmr_ptr: fabric context for TMR req
1971   * @tm_type: Type of TM request
1972   * @gfp: gfp type for caller
1973   * @tag: referenced task tag for TMR_ABORT_TASK
1974   * @flags: submit cmd flags
1975   *
1976   * Callable from all contexts.
1977   **/
1978  
target_submit_tmr(struct se_cmd * se_cmd,struct se_session * se_sess,unsigned char * sense,u64 unpacked_lun,void * fabric_tmr_ptr,unsigned char tm_type,gfp_t gfp,u64 tag,int flags)1979  int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess,
1980  		unsigned char *sense, u64 unpacked_lun,
1981  		void *fabric_tmr_ptr, unsigned char tm_type,
1982  		gfp_t gfp, u64 tag, int flags)
1983  {
1984  	struct se_portal_group *se_tpg;
1985  	int ret;
1986  
1987  	se_tpg = se_sess->se_tpg;
1988  	BUG_ON(!se_tpg);
1989  
1990  	__target_init_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
1991  			  0, DMA_NONE, TCM_SIMPLE_TAG, sense, unpacked_lun,
1992  			  se_sess->cmd_cnt);
1993  	/*
1994  	 * FIXME: Currently expect caller to handle se_cmd->se_tmr_req
1995  	 * allocation failure.
1996  	 */
1997  	ret = core_tmr_alloc_req(se_cmd, fabric_tmr_ptr, tm_type, gfp);
1998  	if (ret < 0)
1999  		return -ENOMEM;
2000  
2001  	if (tm_type == TMR_ABORT_TASK)
2002  		se_cmd->se_tmr_req->ref_task_tag = tag;
2003  
2004  	/* See target_submit_cmd for commentary */
2005  	ret = target_get_sess_cmd(se_cmd, flags & TARGET_SCF_ACK_KREF);
2006  	if (ret) {
2007  		core_tmr_release_req(se_cmd->se_tmr_req);
2008  		return ret;
2009  	}
2010  
2011  	ret = transport_lookup_tmr_lun(se_cmd);
2012  	if (ret)
2013  		goto failure;
2014  
2015  	transport_generic_handle_tmr(se_cmd);
2016  	return 0;
2017  
2018  	/*
2019  	 * For callback during failure handling, push this work off
2020  	 * to process context with TMR_LUN_DOES_NOT_EXIST status.
2021  	 */
2022  failure:
2023  	INIT_WORK(&se_cmd->work, target_complete_tmr_failure);
2024  	schedule_work(&se_cmd->work);
2025  	return 0;
2026  }
2027  EXPORT_SYMBOL(target_submit_tmr);
2028  
2029  /*
2030   * Handle SAM-esque emulation for generic transport request failures.
2031   */
transport_generic_request_failure(struct se_cmd * cmd,sense_reason_t sense_reason)2032  void transport_generic_request_failure(struct se_cmd *cmd,
2033  		sense_reason_t sense_reason)
2034  {
2035  	int ret = 0, post_ret;
2036  
2037  	pr_debug("-----[ Storage Engine Exception; sense_reason %d\n",
2038  		 sense_reason);
2039  	target_show_cmd("-----[ ", cmd);
2040  
2041  	/*
2042  	 * For SAM Task Attribute emulation for failed struct se_cmd
2043  	 */
2044  	transport_complete_task_attr(cmd);
2045  
2046  	if (cmd->transport_complete_callback)
2047  		cmd->transport_complete_callback(cmd, false, &post_ret);
2048  
2049  	if (cmd->transport_state & CMD_T_ABORTED) {
2050  		INIT_WORK(&cmd->work, target_abort_work);
2051  		queue_work(target_completion_wq, &cmd->work);
2052  		return;
2053  	}
2054  
2055  	switch (sense_reason) {
2056  	case TCM_NON_EXISTENT_LUN:
2057  	case TCM_UNSUPPORTED_SCSI_OPCODE:
2058  	case TCM_INVALID_CDB_FIELD:
2059  	case TCM_INVALID_PARAMETER_LIST:
2060  	case TCM_PARAMETER_LIST_LENGTH_ERROR:
2061  	case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
2062  	case TCM_UNKNOWN_MODE_PAGE:
2063  	case TCM_WRITE_PROTECTED:
2064  	case TCM_ADDRESS_OUT_OF_RANGE:
2065  	case TCM_CHECK_CONDITION_ABORT_CMD:
2066  	case TCM_CHECK_CONDITION_UNIT_ATTENTION:
2067  	case TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED:
2068  	case TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED:
2069  	case TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED:
2070  	case TCM_COPY_TARGET_DEVICE_NOT_REACHABLE:
2071  	case TCM_TOO_MANY_TARGET_DESCS:
2072  	case TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE:
2073  	case TCM_TOO_MANY_SEGMENT_DESCS:
2074  	case TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE:
2075  	case TCM_INVALID_FIELD_IN_COMMAND_IU:
2076  	case TCM_ALUA_TG_PT_STANDBY:
2077  	case TCM_ALUA_TG_PT_UNAVAILABLE:
2078  	case TCM_ALUA_STATE_TRANSITION:
2079  	case TCM_ALUA_OFFLINE:
2080  		break;
2081  	case TCM_OUT_OF_RESOURCES:
2082  		cmd->scsi_status = SAM_STAT_TASK_SET_FULL;
2083  		goto queue_status;
2084  	case TCM_LUN_BUSY:
2085  		cmd->scsi_status = SAM_STAT_BUSY;
2086  		goto queue_status;
2087  	case TCM_RESERVATION_CONFLICT:
2088  		/*
2089  		 * No SENSE Data payload for this case, set SCSI Status
2090  		 * and queue the response to $FABRIC_MOD.
2091  		 *
2092  		 * Uses linux/include/scsi/scsi.h SAM status codes defs
2093  		 */
2094  		cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2095  		/*
2096  		 * For UA Interlock Code 11b, a RESERVATION CONFLICT will
2097  		 * establish a UNIT ATTENTION with PREVIOUS RESERVATION
2098  		 * CONFLICT STATUS.
2099  		 *
2100  		 * See spc4r17, section 7.4.6 Control Mode Page, Table 349
2101  		 */
2102  		if (cmd->se_sess &&
2103  		    cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl
2104  					== TARGET_UA_INTLCK_CTRL_ESTABLISH_UA) {
2105  			target_ua_allocate_lun(cmd->se_sess->se_node_acl,
2106  					       cmd->orig_fe_lun, 0x2C,
2107  					ASCQ_2CH_PREVIOUS_RESERVATION_CONFLICT_STATUS);
2108  		}
2109  
2110  		goto queue_status;
2111  	default:
2112  		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
2113  			cmd->t_task_cdb[0], sense_reason);
2114  		sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
2115  		break;
2116  	}
2117  
2118  	ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0);
2119  	if (ret)
2120  		goto queue_full;
2121  
2122  check_stop:
2123  	transport_lun_remove_cmd(cmd);
2124  	transport_cmd_check_stop_to_fabric(cmd);
2125  	return;
2126  
2127  queue_status:
2128  	trace_target_cmd_complete(cmd);
2129  	ret = cmd->se_tfo->queue_status(cmd);
2130  	if (!ret)
2131  		goto check_stop;
2132  queue_full:
2133  	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2134  }
2135  EXPORT_SYMBOL(transport_generic_request_failure);
2136  
__target_execute_cmd(struct se_cmd * cmd,bool do_checks)2137  void __target_execute_cmd(struct se_cmd *cmd, bool do_checks)
2138  {
2139  	sense_reason_t ret;
2140  
2141  	if (!cmd->execute_cmd) {
2142  		ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2143  		goto err;
2144  	}
2145  	if (do_checks) {
2146  		/*
2147  		 * Check for an existing UNIT ATTENTION condition after
2148  		 * target_handle_task_attr() has done SAM task attr
2149  		 * checking, and possibly have already defered execution
2150  		 * out to target_restart_delayed_cmds() context.
2151  		 */
2152  		ret = target_scsi3_ua_check(cmd);
2153  		if (ret)
2154  			goto err;
2155  
2156  		ret = target_alua_state_check(cmd);
2157  		if (ret)
2158  			goto err;
2159  
2160  		ret = target_check_reservation(cmd);
2161  		if (ret) {
2162  			cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT;
2163  			goto err;
2164  		}
2165  	}
2166  
2167  	ret = cmd->execute_cmd(cmd);
2168  	if (!ret)
2169  		return;
2170  err:
2171  	spin_lock_irq(&cmd->t_state_lock);
2172  	cmd->transport_state &= ~CMD_T_SENT;
2173  	spin_unlock_irq(&cmd->t_state_lock);
2174  
2175  	transport_generic_request_failure(cmd, ret);
2176  }
2177  
target_write_prot_action(struct se_cmd * cmd)2178  static int target_write_prot_action(struct se_cmd *cmd)
2179  {
2180  	u32 sectors;
2181  	/*
2182  	 * Perform WRITE_INSERT of PI using software emulation when backend
2183  	 * device has PI enabled, if the transport has not already generated
2184  	 * PI using hardware WRITE_INSERT offload.
2185  	 */
2186  	switch (cmd->prot_op) {
2187  	case TARGET_PROT_DOUT_INSERT:
2188  		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT))
2189  			sbc_dif_generate(cmd);
2190  		break;
2191  	case TARGET_PROT_DOUT_STRIP:
2192  		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP)
2193  			break;
2194  
2195  		sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size);
2196  		cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2197  					     sectors, 0, cmd->t_prot_sg, 0);
2198  		if (unlikely(cmd->pi_err)) {
2199  			spin_lock_irq(&cmd->t_state_lock);
2200  			cmd->transport_state &= ~CMD_T_SENT;
2201  			spin_unlock_irq(&cmd->t_state_lock);
2202  			transport_generic_request_failure(cmd, cmd->pi_err);
2203  			return -1;
2204  		}
2205  		break;
2206  	default:
2207  		break;
2208  	}
2209  
2210  	return 0;
2211  }
2212  
target_handle_task_attr(struct se_cmd * cmd)2213  static bool target_handle_task_attr(struct se_cmd *cmd)
2214  {
2215  	struct se_device *dev = cmd->se_dev;
2216  
2217  	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2218  		return false;
2219  
2220  	cmd->se_cmd_flags |= SCF_TASK_ATTR_SET;
2221  
2222  	/*
2223  	 * Check for the existence of HEAD_OF_QUEUE, and if true return 1
2224  	 * to allow the passed struct se_cmd list of tasks to the front of the list.
2225  	 */
2226  	switch (cmd->sam_task_attr) {
2227  	case TCM_HEAD_TAG:
2228  		atomic_inc_mb(&dev->non_ordered);
2229  		pr_debug("Added HEAD_OF_QUEUE for CDB: 0x%02x\n",
2230  			 cmd->t_task_cdb[0]);
2231  		return false;
2232  	case TCM_ORDERED_TAG:
2233  		atomic_inc_mb(&dev->delayed_cmd_count);
2234  
2235  		pr_debug("Added ORDERED for CDB: 0x%02x to ordered list\n",
2236  			 cmd->t_task_cdb[0]);
2237  		break;
2238  	default:
2239  		/*
2240  		 * For SIMPLE and UNTAGGED Task Attribute commands
2241  		 */
2242  		atomic_inc_mb(&dev->non_ordered);
2243  
2244  		if (atomic_read(&dev->delayed_cmd_count) == 0)
2245  			return false;
2246  		break;
2247  	}
2248  
2249  	if (cmd->sam_task_attr != TCM_ORDERED_TAG) {
2250  		atomic_inc_mb(&dev->delayed_cmd_count);
2251  		/*
2252  		 * We will account for this when we dequeue from the delayed
2253  		 * list.
2254  		 */
2255  		atomic_dec_mb(&dev->non_ordered);
2256  	}
2257  
2258  	spin_lock_irq(&cmd->t_state_lock);
2259  	cmd->transport_state &= ~CMD_T_SENT;
2260  	spin_unlock_irq(&cmd->t_state_lock);
2261  
2262  	spin_lock(&dev->delayed_cmd_lock);
2263  	list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list);
2264  	spin_unlock(&dev->delayed_cmd_lock);
2265  
2266  	pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to delayed CMD listn",
2267  		cmd->t_task_cdb[0], cmd->sam_task_attr);
2268  	/*
2269  	 * We may have no non ordered cmds when this function started or we
2270  	 * could have raced with the last simple/head cmd completing, so kick
2271  	 * the delayed handler here.
2272  	 */
2273  	schedule_work(&dev->delayed_cmd_work);
2274  	return true;
2275  }
2276  
target_execute_cmd(struct se_cmd * cmd)2277  void target_execute_cmd(struct se_cmd *cmd)
2278  {
2279  	/*
2280  	 * Determine if frontend context caller is requesting the stopping of
2281  	 * this command for frontend exceptions.
2282  	 *
2283  	 * If the received CDB has already been aborted stop processing it here.
2284  	 */
2285  	if (target_cmd_interrupted(cmd))
2286  		return;
2287  
2288  	spin_lock_irq(&cmd->t_state_lock);
2289  	cmd->t_state = TRANSPORT_PROCESSING;
2290  	cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT;
2291  	spin_unlock_irq(&cmd->t_state_lock);
2292  
2293  	if (target_write_prot_action(cmd))
2294  		return;
2295  
2296  	if (target_handle_task_attr(cmd))
2297  		return;
2298  
2299  	__target_execute_cmd(cmd, true);
2300  }
2301  EXPORT_SYMBOL(target_execute_cmd);
2302  
2303  /*
2304   * Process all commands up to the last received ORDERED task attribute which
2305   * requires another blocking boundary
2306   */
target_do_delayed_work(struct work_struct * work)2307  void target_do_delayed_work(struct work_struct *work)
2308  {
2309  	struct se_device *dev = container_of(work, struct se_device,
2310  					     delayed_cmd_work);
2311  
2312  	spin_lock(&dev->delayed_cmd_lock);
2313  	while (!dev->ordered_sync_in_progress) {
2314  		struct se_cmd *cmd;
2315  
2316  		if (list_empty(&dev->delayed_cmd_list))
2317  			break;
2318  
2319  		cmd = list_entry(dev->delayed_cmd_list.next,
2320  				 struct se_cmd, se_delayed_node);
2321  
2322  		if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2323  			/*
2324  			 * Check if we started with:
2325  			 * [ordered] [simple] [ordered]
2326  			 * and we are now at the last ordered so we have to wait
2327  			 * for the simple cmd.
2328  			 */
2329  			if (atomic_read(&dev->non_ordered) > 0)
2330  				break;
2331  
2332  			dev->ordered_sync_in_progress = true;
2333  		}
2334  
2335  		list_del(&cmd->se_delayed_node);
2336  		atomic_dec_mb(&dev->delayed_cmd_count);
2337  		spin_unlock(&dev->delayed_cmd_lock);
2338  
2339  		if (cmd->sam_task_attr != TCM_ORDERED_TAG)
2340  			atomic_inc_mb(&dev->non_ordered);
2341  
2342  		cmd->transport_state |= CMD_T_SENT;
2343  
2344  		__target_execute_cmd(cmd, true);
2345  
2346  		spin_lock(&dev->delayed_cmd_lock);
2347  	}
2348  	spin_unlock(&dev->delayed_cmd_lock);
2349  }
2350  
2351  /*
2352   * Called from I/O completion to determine which dormant/delayed
2353   * and ordered cmds need to have their tasks added to the execution queue.
2354   */
transport_complete_task_attr(struct se_cmd * cmd)2355  static void transport_complete_task_attr(struct se_cmd *cmd)
2356  {
2357  	struct se_device *dev = cmd->se_dev;
2358  
2359  	if (dev->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
2360  		return;
2361  
2362  	if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET))
2363  		goto restart;
2364  
2365  	if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
2366  		atomic_dec_mb(&dev->non_ordered);
2367  		dev->dev_cur_ordered_id++;
2368  	} else if (cmd->sam_task_attr == TCM_HEAD_TAG) {
2369  		atomic_dec_mb(&dev->non_ordered);
2370  		dev->dev_cur_ordered_id++;
2371  		pr_debug("Incremented dev_cur_ordered_id: %u for HEAD_OF_QUEUE\n",
2372  			 dev->dev_cur_ordered_id);
2373  	} else if (cmd->sam_task_attr == TCM_ORDERED_TAG) {
2374  		spin_lock(&dev->delayed_cmd_lock);
2375  		dev->ordered_sync_in_progress = false;
2376  		spin_unlock(&dev->delayed_cmd_lock);
2377  
2378  		dev->dev_cur_ordered_id++;
2379  		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED\n",
2380  			 dev->dev_cur_ordered_id);
2381  	}
2382  	cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET;
2383  
2384  restart:
2385  	if (atomic_read(&dev->delayed_cmd_count) > 0)
2386  		schedule_work(&dev->delayed_cmd_work);
2387  }
2388  
transport_complete_qf(struct se_cmd * cmd)2389  static void transport_complete_qf(struct se_cmd *cmd)
2390  {
2391  	int ret = 0;
2392  
2393  	transport_complete_task_attr(cmd);
2394  	/*
2395  	 * If a fabric driver ->write_pending() or ->queue_data_in() callback
2396  	 * has returned neither -ENOMEM or -EAGAIN, assume it's fatal and
2397  	 * the same callbacks should not be retried.  Return CHECK_CONDITION
2398  	 * if a scsi_status is not already set.
2399  	 *
2400  	 * If a fabric driver ->queue_status() has returned non zero, always
2401  	 * keep retrying no matter what..
2402  	 */
2403  	if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) {
2404  		if (cmd->scsi_status)
2405  			goto queue_status;
2406  
2407  		translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE);
2408  		goto queue_status;
2409  	}
2410  
2411  	/*
2412  	 * Check if we need to send a sense buffer from
2413  	 * the struct se_cmd in question. We do NOT want
2414  	 * to take this path of the IO has been marked as
2415  	 * needing to be treated like a "normal read". This
2416  	 * is the case if it's a tape read, and either the
2417  	 * FM, EOM, or ILI bits are set, but there is no
2418  	 * sense data.
2419  	 */
2420  	if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2421  	    cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE)
2422  		goto queue_status;
2423  
2424  	switch (cmd->data_direction) {
2425  	case DMA_FROM_DEVICE:
2426  		/* queue status if not treating this as a normal read */
2427  		if (cmd->scsi_status &&
2428  		    !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2429  			goto queue_status;
2430  
2431  		trace_target_cmd_complete(cmd);
2432  		ret = cmd->se_tfo->queue_data_in(cmd);
2433  		break;
2434  	case DMA_TO_DEVICE:
2435  		if (cmd->se_cmd_flags & SCF_BIDI) {
2436  			ret = cmd->se_tfo->queue_data_in(cmd);
2437  			break;
2438  		}
2439  		fallthrough;
2440  	case DMA_NONE:
2441  queue_status:
2442  		trace_target_cmd_complete(cmd);
2443  		ret = cmd->se_tfo->queue_status(cmd);
2444  		break;
2445  	default:
2446  		break;
2447  	}
2448  
2449  	if (ret < 0) {
2450  		transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2451  		return;
2452  	}
2453  	transport_lun_remove_cmd(cmd);
2454  	transport_cmd_check_stop_to_fabric(cmd);
2455  }
2456  
transport_handle_queue_full(struct se_cmd * cmd,struct se_device * dev,int err,bool write_pending)2457  static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev,
2458  					int err, bool write_pending)
2459  {
2460  	/*
2461  	 * -EAGAIN or -ENOMEM signals retry of ->write_pending() and/or
2462  	 * ->queue_data_in() callbacks from new process context.
2463  	 *
2464  	 * Otherwise for other errors, transport_complete_qf() will send
2465  	 * CHECK_CONDITION via ->queue_status() instead of attempting to
2466  	 * retry associated fabric driver data-transfer callbacks.
2467  	 */
2468  	if (err == -EAGAIN || err == -ENOMEM) {
2469  		cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP :
2470  						 TRANSPORT_COMPLETE_QF_OK;
2471  	} else {
2472  		pr_warn_ratelimited("Got unknown fabric queue status: %d\n", err);
2473  		cmd->t_state = TRANSPORT_COMPLETE_QF_ERR;
2474  	}
2475  
2476  	spin_lock_irq(&dev->qf_cmd_lock);
2477  	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
2478  	atomic_inc_mb(&dev->dev_qf_count);
2479  	spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
2480  
2481  	schedule_work(&cmd->se_dev->qf_work_queue);
2482  }
2483  
target_read_prot_action(struct se_cmd * cmd)2484  static bool target_read_prot_action(struct se_cmd *cmd)
2485  {
2486  	switch (cmd->prot_op) {
2487  	case TARGET_PROT_DIN_STRIP:
2488  		if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) {
2489  			u32 sectors = cmd->data_length >>
2490  				  ilog2(cmd->se_dev->dev_attrib.block_size);
2491  
2492  			cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba,
2493  						     sectors, 0, cmd->t_prot_sg,
2494  						     0);
2495  			if (cmd->pi_err)
2496  				return true;
2497  		}
2498  		break;
2499  	case TARGET_PROT_DIN_INSERT:
2500  		if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT)
2501  			break;
2502  
2503  		sbc_dif_generate(cmd);
2504  		break;
2505  	default:
2506  		break;
2507  	}
2508  
2509  	return false;
2510  }
2511  
target_complete_ok_work(struct work_struct * work)2512  static void target_complete_ok_work(struct work_struct *work)
2513  {
2514  	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
2515  	int ret;
2516  
2517  	/*
2518  	 * Check if we need to move delayed/dormant tasks from cmds on the
2519  	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task
2520  	 * Attribute.
2521  	 */
2522  	transport_complete_task_attr(cmd);
2523  
2524  	/*
2525  	 * Check to schedule QUEUE_FULL work, or execute an existing
2526  	 * cmd->transport_qf_callback()
2527  	 */
2528  	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)
2529  		schedule_work(&cmd->se_dev->qf_work_queue);
2530  
2531  	/*
2532  	 * Check if we need to send a sense buffer from
2533  	 * the struct se_cmd in question. We do NOT want
2534  	 * to take this path of the IO has been marked as
2535  	 * needing to be treated like a "normal read". This
2536  	 * is the case if it's a tape read, and either the
2537  	 * FM, EOM, or ILI bits are set, but there is no
2538  	 * sense data.
2539  	 */
2540  	if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) &&
2541  	    cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
2542  		WARN_ON(!cmd->scsi_status);
2543  		ret = transport_send_check_condition_and_sense(
2544  					cmd, 0, 1);
2545  		if (ret)
2546  			goto queue_full;
2547  
2548  		transport_lun_remove_cmd(cmd);
2549  		transport_cmd_check_stop_to_fabric(cmd);
2550  		return;
2551  	}
2552  	/*
2553  	 * Check for a callback, used by amongst other things
2554  	 * XDWRITE_READ_10 and COMPARE_AND_WRITE emulation.
2555  	 */
2556  	if (cmd->transport_complete_callback) {
2557  		sense_reason_t rc;
2558  		bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE);
2559  		bool zero_dl = !(cmd->data_length);
2560  		int post_ret = 0;
2561  
2562  		rc = cmd->transport_complete_callback(cmd, true, &post_ret);
2563  		if (!rc && !post_ret) {
2564  			if (caw && zero_dl)
2565  				goto queue_rsp;
2566  
2567  			return;
2568  		} else if (rc) {
2569  			ret = transport_send_check_condition_and_sense(cmd,
2570  						rc, 0);
2571  			if (ret)
2572  				goto queue_full;
2573  
2574  			transport_lun_remove_cmd(cmd);
2575  			transport_cmd_check_stop_to_fabric(cmd);
2576  			return;
2577  		}
2578  	}
2579  
2580  queue_rsp:
2581  	switch (cmd->data_direction) {
2582  	case DMA_FROM_DEVICE:
2583  		/*
2584  		 * if this is a READ-type IO, but SCSI status
2585  		 * is set, then skip returning data and just
2586  		 * return the status -- unless this IO is marked
2587  		 * as needing to be treated as a normal read,
2588  		 * in which case we want to go ahead and return
2589  		 * the data. This happens, for example, for tape
2590  		 * reads with the FM, EOM, or ILI bits set, with
2591  		 * no sense data.
2592  		 */
2593  		if (cmd->scsi_status &&
2594  		    !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL))
2595  			goto queue_status;
2596  
2597  		atomic_long_add(cmd->data_length,
2598  				&cmd->se_lun->lun_stats.tx_data_octets);
2599  		/*
2600  		 * Perform READ_STRIP of PI using software emulation when
2601  		 * backend had PI enabled, if the transport will not be
2602  		 * performing hardware READ_STRIP offload.
2603  		 */
2604  		if (target_read_prot_action(cmd)) {
2605  			ret = transport_send_check_condition_and_sense(cmd,
2606  						cmd->pi_err, 0);
2607  			if (ret)
2608  				goto queue_full;
2609  
2610  			transport_lun_remove_cmd(cmd);
2611  			transport_cmd_check_stop_to_fabric(cmd);
2612  			return;
2613  		}
2614  
2615  		trace_target_cmd_complete(cmd);
2616  		ret = cmd->se_tfo->queue_data_in(cmd);
2617  		if (ret)
2618  			goto queue_full;
2619  		break;
2620  	case DMA_TO_DEVICE:
2621  		atomic_long_add(cmd->data_length,
2622  				&cmd->se_lun->lun_stats.rx_data_octets);
2623  		/*
2624  		 * Check if we need to send READ payload for BIDI-COMMAND
2625  		 */
2626  		if (cmd->se_cmd_flags & SCF_BIDI) {
2627  			atomic_long_add(cmd->data_length,
2628  					&cmd->se_lun->lun_stats.tx_data_octets);
2629  			ret = cmd->se_tfo->queue_data_in(cmd);
2630  			if (ret)
2631  				goto queue_full;
2632  			break;
2633  		}
2634  		fallthrough;
2635  	case DMA_NONE:
2636  queue_status:
2637  		trace_target_cmd_complete(cmd);
2638  		ret = cmd->se_tfo->queue_status(cmd);
2639  		if (ret)
2640  			goto queue_full;
2641  		break;
2642  	default:
2643  		break;
2644  	}
2645  
2646  	transport_lun_remove_cmd(cmd);
2647  	transport_cmd_check_stop_to_fabric(cmd);
2648  	return;
2649  
2650  queue_full:
2651  	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
2652  		" data_direction: %d\n", cmd, cmd->data_direction);
2653  
2654  	transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
2655  }
2656  
target_free_sgl(struct scatterlist * sgl,int nents)2657  void target_free_sgl(struct scatterlist *sgl, int nents)
2658  {
2659  	sgl_free_n_order(sgl, nents, 0);
2660  }
2661  EXPORT_SYMBOL(target_free_sgl);
2662  
transport_reset_sgl_orig(struct se_cmd * cmd)2663  static inline void transport_reset_sgl_orig(struct se_cmd *cmd)
2664  {
2665  	/*
2666  	 * Check for saved t_data_sg that may be used for COMPARE_AND_WRITE
2667  	 * emulation, and free + reset pointers if necessary..
2668  	 */
2669  	if (!cmd->t_data_sg_orig)
2670  		return;
2671  
2672  	kfree(cmd->t_data_sg);
2673  	cmd->t_data_sg = cmd->t_data_sg_orig;
2674  	cmd->t_data_sg_orig = NULL;
2675  	cmd->t_data_nents = cmd->t_data_nents_orig;
2676  	cmd->t_data_nents_orig = 0;
2677  }
2678  
transport_free_pages(struct se_cmd * cmd)2679  static inline void transport_free_pages(struct se_cmd *cmd)
2680  {
2681  	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2682  		target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents);
2683  		cmd->t_prot_sg = NULL;
2684  		cmd->t_prot_nents = 0;
2685  	}
2686  
2687  	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) {
2688  		/*
2689  		 * Release special case READ buffer payload required for
2690  		 * SG_TO_MEM_NOALLOC to function with COMPARE_AND_WRITE
2691  		 */
2692  		if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) {
2693  			target_free_sgl(cmd->t_bidi_data_sg,
2694  					   cmd->t_bidi_data_nents);
2695  			cmd->t_bidi_data_sg = NULL;
2696  			cmd->t_bidi_data_nents = 0;
2697  		}
2698  		transport_reset_sgl_orig(cmd);
2699  		return;
2700  	}
2701  	transport_reset_sgl_orig(cmd);
2702  
2703  	target_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
2704  	cmd->t_data_sg = NULL;
2705  	cmd->t_data_nents = 0;
2706  
2707  	target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
2708  	cmd->t_bidi_data_sg = NULL;
2709  	cmd->t_bidi_data_nents = 0;
2710  }
2711  
transport_kmap_data_sg(struct se_cmd * cmd)2712  void *transport_kmap_data_sg(struct se_cmd *cmd)
2713  {
2714  	struct scatterlist *sg = cmd->t_data_sg;
2715  	struct page **pages;
2716  	int i;
2717  
2718  	/*
2719  	 * We need to take into account a possible offset here for fabrics like
2720  	 * tcm_loop who may be using a contig buffer from the SCSI midlayer for
2721  	 * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd()
2722  	 */
2723  	if (!cmd->t_data_nents)
2724  		return NULL;
2725  
2726  	BUG_ON(!sg);
2727  	if (cmd->t_data_nents == 1)
2728  		return kmap(sg_page(sg)) + sg->offset;
2729  
2730  	/* >1 page. use vmap */
2731  	pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL);
2732  	if (!pages)
2733  		return NULL;
2734  
2735  	/* convert sg[] to pages[] */
2736  	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) {
2737  		pages[i] = sg_page(sg);
2738  	}
2739  
2740  	cmd->t_data_vmap = vmap(pages, cmd->t_data_nents,  VM_MAP, PAGE_KERNEL);
2741  	kfree(pages);
2742  	if (!cmd->t_data_vmap)
2743  		return NULL;
2744  
2745  	return cmd->t_data_vmap + cmd->t_data_sg[0].offset;
2746  }
2747  EXPORT_SYMBOL(transport_kmap_data_sg);
2748  
transport_kunmap_data_sg(struct se_cmd * cmd)2749  void transport_kunmap_data_sg(struct se_cmd *cmd)
2750  {
2751  	if (!cmd->t_data_nents) {
2752  		return;
2753  	} else if (cmd->t_data_nents == 1) {
2754  		kunmap(sg_page(cmd->t_data_sg));
2755  		return;
2756  	}
2757  
2758  	vunmap(cmd->t_data_vmap);
2759  	cmd->t_data_vmap = NULL;
2760  }
2761  EXPORT_SYMBOL(transport_kunmap_data_sg);
2762  
2763  int
target_alloc_sgl(struct scatterlist ** sgl,unsigned int * nents,u32 length,bool zero_page,bool chainable)2764  target_alloc_sgl(struct scatterlist **sgl, unsigned int *nents, u32 length,
2765  		 bool zero_page, bool chainable)
2766  {
2767  	gfp_t gfp = GFP_KERNEL | (zero_page ? __GFP_ZERO : 0);
2768  
2769  	*sgl = sgl_alloc_order(length, 0, chainable, gfp, nents);
2770  	return *sgl ? 0 : -ENOMEM;
2771  }
2772  EXPORT_SYMBOL(target_alloc_sgl);
2773  
2774  /*
2775   * Allocate any required resources to execute the command.  For writes we
2776   * might not have the payload yet, so notify the fabric via a call to
2777   * ->write_pending instead. Otherwise place it on the execution queue.
2778   */
2779  sense_reason_t
transport_generic_new_cmd(struct se_cmd * cmd)2780  transport_generic_new_cmd(struct se_cmd *cmd)
2781  {
2782  	unsigned long flags;
2783  	int ret = 0;
2784  	bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB);
2785  
2786  	if (cmd->prot_op != TARGET_PROT_NORMAL &&
2787  	    !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) {
2788  		ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents,
2789  				       cmd->prot_length, true, false);
2790  		if (ret < 0)
2791  			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2792  	}
2793  
2794  	/*
2795  	 * Determine if the TCM fabric module has already allocated physical
2796  	 * memory, and is directly calling transport_generic_map_mem_to_cmd()
2797  	 * beforehand.
2798  	 */
2799  	if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) &&
2800  	    cmd->data_length) {
2801  
2802  		if ((cmd->se_cmd_flags & SCF_BIDI) ||
2803  		    (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) {
2804  			u32 bidi_length;
2805  
2806  			if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)
2807  				bidi_length = cmd->t_task_nolb *
2808  					      cmd->se_dev->dev_attrib.block_size;
2809  			else
2810  				bidi_length = cmd->data_length;
2811  
2812  			ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2813  					       &cmd->t_bidi_data_nents,
2814  					       bidi_length, zero_flag, false);
2815  			if (ret < 0)
2816  				return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2817  		}
2818  
2819  		ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents,
2820  				       cmd->data_length, zero_flag, false);
2821  		if (ret < 0)
2822  			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2823  	} else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) &&
2824  		    cmd->data_length) {
2825  		/*
2826  		 * Special case for COMPARE_AND_WRITE with fabrics
2827  		 * using SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC.
2828  		 */
2829  		u32 caw_length = cmd->t_task_nolb *
2830  				 cmd->se_dev->dev_attrib.block_size;
2831  
2832  		ret = target_alloc_sgl(&cmd->t_bidi_data_sg,
2833  				       &cmd->t_bidi_data_nents,
2834  				       caw_length, zero_flag, false);
2835  		if (ret < 0)
2836  			return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
2837  	}
2838  	/*
2839  	 * If this command is not a write we can execute it right here,
2840  	 * for write buffers we need to notify the fabric driver first
2841  	 * and let it call back once the write buffers are ready.
2842  	 */
2843  	target_add_to_state_list(cmd);
2844  	if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2845  		target_execute_cmd(cmd);
2846  		return 0;
2847  	}
2848  
2849  	spin_lock_irqsave(&cmd->t_state_lock, flags);
2850  	cmd->t_state = TRANSPORT_WRITE_PENDING;
2851  	/*
2852  	 * Determine if frontend context caller is requesting the stopping of
2853  	 * this command for frontend exceptions.
2854  	 */
2855  	if (cmd->transport_state & CMD_T_STOP &&
2856  	    !cmd->se_tfo->write_pending_must_be_called) {
2857  		pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
2858  			 __func__, __LINE__, cmd->tag);
2859  
2860  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2861  
2862  		complete_all(&cmd->t_transport_stop_comp);
2863  		return 0;
2864  	}
2865  	cmd->transport_state &= ~CMD_T_ACTIVE;
2866  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2867  
2868  	ret = cmd->se_tfo->write_pending(cmd);
2869  	if (ret)
2870  		goto queue_full;
2871  
2872  	return 0;
2873  
2874  queue_full:
2875  	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
2876  	transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2877  	return 0;
2878  }
2879  EXPORT_SYMBOL(transport_generic_new_cmd);
2880  
transport_write_pending_qf(struct se_cmd * cmd)2881  static void transport_write_pending_qf(struct se_cmd *cmd)
2882  {
2883  	unsigned long flags;
2884  	int ret;
2885  	bool stop;
2886  
2887  	spin_lock_irqsave(&cmd->t_state_lock, flags);
2888  	stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED));
2889  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2890  
2891  	if (stop) {
2892  		pr_debug("%s:%d CMD_T_STOP|CMD_T_ABORTED for ITT: 0x%08llx\n",
2893  			__func__, __LINE__, cmd->tag);
2894  		complete_all(&cmd->t_transport_stop_comp);
2895  		return;
2896  	}
2897  
2898  	ret = cmd->se_tfo->write_pending(cmd);
2899  	if (ret) {
2900  		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n",
2901  			 cmd);
2902  		transport_handle_queue_full(cmd, cmd->se_dev, ret, true);
2903  	}
2904  }
2905  
2906  static bool
2907  __transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
2908  			   unsigned long *flags);
2909  
target_wait_free_cmd(struct se_cmd * cmd,bool * aborted,bool * tas)2910  static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
2911  {
2912  	unsigned long flags;
2913  
2914  	spin_lock_irqsave(&cmd->t_state_lock, flags);
2915  	__transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
2916  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
2917  }
2918  
2919  /*
2920   * Call target_put_sess_cmd() and wait until target_release_cmd_kref(@cmd) has
2921   * finished.
2922   */
target_put_cmd_and_wait(struct se_cmd * cmd)2923  void target_put_cmd_and_wait(struct se_cmd *cmd)
2924  {
2925  	DECLARE_COMPLETION_ONSTACK(compl);
2926  
2927  	WARN_ON_ONCE(cmd->abrt_compl);
2928  	cmd->abrt_compl = &compl;
2929  	target_put_sess_cmd(cmd);
2930  	wait_for_completion(&compl);
2931  }
2932  
2933  /*
2934   * This function is called by frontend drivers after processing of a command
2935   * has finished.
2936   *
2937   * The protocol for ensuring that either the regular frontend command
2938   * processing flow or target_handle_abort() code drops one reference is as
2939   * follows:
2940   * - Calling .queue_data_in(), .queue_status() or queue_tm_rsp() will cause
2941   *   the frontend driver to call this function synchronously or asynchronously.
2942   *   That will cause one reference to be dropped.
2943   * - During regular command processing the target core sets CMD_T_COMPLETE
2944   *   before invoking one of the .queue_*() functions.
2945   * - The code that aborts commands skips commands and TMFs for which
2946   *   CMD_T_COMPLETE has been set.
2947   * - CMD_T_ABORTED is set atomically after the CMD_T_COMPLETE check for
2948   *   commands that will be aborted.
2949   * - If the CMD_T_ABORTED flag is set but CMD_T_TAS has not been set
2950   *   transport_generic_free_cmd() skips its call to target_put_sess_cmd().
2951   * - For aborted commands for which CMD_T_TAS has been set .queue_status() will
2952   *   be called and will drop a reference.
2953   * - For aborted commands for which CMD_T_TAS has not been set .aborted_task()
2954   *   will be called. target_handle_abort() will drop the final reference.
2955   */
transport_generic_free_cmd(struct se_cmd * cmd,int wait_for_tasks)2956  int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
2957  {
2958  	DECLARE_COMPLETION_ONSTACK(compl);
2959  	int ret = 0;
2960  	bool aborted = false, tas = false;
2961  
2962  	if (wait_for_tasks)
2963  		target_wait_free_cmd(cmd, &aborted, &tas);
2964  
2965  	if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) {
2966  		/*
2967  		 * Handle WRITE failure case where transport_generic_new_cmd()
2968  		 * has already added se_cmd to state_list, but fabric has
2969  		 * failed command before I/O submission.
2970  		 */
2971  		if (cmd->state_active)
2972  			target_remove_from_state_list(cmd);
2973  
2974  		if (cmd->se_lun)
2975  			transport_lun_remove_cmd(cmd);
2976  	}
2977  	if (aborted)
2978  		cmd->free_compl = &compl;
2979  	ret = target_put_sess_cmd(cmd);
2980  	if (aborted) {
2981  		pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
2982  		wait_for_completion(&compl);
2983  		ret = 1;
2984  	}
2985  	return ret;
2986  }
2987  EXPORT_SYMBOL(transport_generic_free_cmd);
2988  
2989  /**
2990   * target_get_sess_cmd - Verify the session is accepting cmds and take ref
2991   * @se_cmd:	command descriptor to add
2992   * @ack_kref:	Signal that fabric will perform an ack target_put_sess_cmd()
2993   */
target_get_sess_cmd(struct se_cmd * se_cmd,bool ack_kref)2994  int target_get_sess_cmd(struct se_cmd *se_cmd, bool ack_kref)
2995  {
2996  	int ret = 0;
2997  
2998  	/*
2999  	 * Add a second kref if the fabric caller is expecting to handle
3000  	 * fabric acknowledgement that requires two target_put_sess_cmd()
3001  	 * invocations before se_cmd descriptor release.
3002  	 */
3003  	if (ack_kref) {
3004  		kref_get(&se_cmd->cmd_kref);
3005  		se_cmd->se_cmd_flags |= SCF_ACK_KREF;
3006  	}
3007  
3008  	/*
3009  	 * Users like xcopy do not use counters since they never do a stop
3010  	 * and wait.
3011  	 */
3012  	if (se_cmd->cmd_cnt) {
3013  		if (!percpu_ref_tryget_live(&se_cmd->cmd_cnt->refcnt))
3014  			ret = -ESHUTDOWN;
3015  	}
3016  	if (ret && ack_kref)
3017  		target_put_sess_cmd(se_cmd);
3018  
3019  	return ret;
3020  }
3021  EXPORT_SYMBOL(target_get_sess_cmd);
3022  
target_free_cmd_mem(struct se_cmd * cmd)3023  static void target_free_cmd_mem(struct se_cmd *cmd)
3024  {
3025  	transport_free_pages(cmd);
3026  
3027  	if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
3028  		core_tmr_release_req(cmd->se_tmr_req);
3029  	if (cmd->t_task_cdb != cmd->__t_task_cdb)
3030  		kfree(cmd->t_task_cdb);
3031  }
3032  
target_release_cmd_kref(struct kref * kref)3033  static void target_release_cmd_kref(struct kref *kref)
3034  {
3035  	struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
3036  	struct target_cmd_counter *cmd_cnt = se_cmd->cmd_cnt;
3037  	struct completion *free_compl = se_cmd->free_compl;
3038  	struct completion *abrt_compl = se_cmd->abrt_compl;
3039  
3040  	target_free_cmd_mem(se_cmd);
3041  	se_cmd->se_tfo->release_cmd(se_cmd);
3042  	if (free_compl)
3043  		complete(free_compl);
3044  	if (abrt_compl)
3045  		complete(abrt_compl);
3046  
3047  	if (cmd_cnt)
3048  		percpu_ref_put(&cmd_cnt->refcnt);
3049  }
3050  
3051  /**
3052   * target_put_sess_cmd - decrease the command reference count
3053   * @se_cmd:	command to drop a reference from
3054   *
3055   * Returns 1 if and only if this target_put_sess_cmd() call caused the
3056   * refcount to drop to zero. Returns zero otherwise.
3057   */
target_put_sess_cmd(struct se_cmd * se_cmd)3058  int target_put_sess_cmd(struct se_cmd *se_cmd)
3059  {
3060  	return kref_put(&se_cmd->cmd_kref, target_release_cmd_kref);
3061  }
3062  EXPORT_SYMBOL(target_put_sess_cmd);
3063  
data_dir_name(enum dma_data_direction d)3064  static const char *data_dir_name(enum dma_data_direction d)
3065  {
3066  	switch (d) {
3067  	case DMA_BIDIRECTIONAL:	return "BIDI";
3068  	case DMA_TO_DEVICE:	return "WRITE";
3069  	case DMA_FROM_DEVICE:	return "READ";
3070  	case DMA_NONE:		return "NONE";
3071  	}
3072  
3073  	return "(?)";
3074  }
3075  
cmd_state_name(enum transport_state_table t)3076  static const char *cmd_state_name(enum transport_state_table t)
3077  {
3078  	switch (t) {
3079  	case TRANSPORT_NO_STATE:	return "NO_STATE";
3080  	case TRANSPORT_NEW_CMD:		return "NEW_CMD";
3081  	case TRANSPORT_WRITE_PENDING:	return "WRITE_PENDING";
3082  	case TRANSPORT_PROCESSING:	return "PROCESSING";
3083  	case TRANSPORT_COMPLETE:	return "COMPLETE";
3084  	case TRANSPORT_ISTATE_PROCESSING:
3085  					return "ISTATE_PROCESSING";
3086  	case TRANSPORT_COMPLETE_QF_WP:	return "COMPLETE_QF_WP";
3087  	case TRANSPORT_COMPLETE_QF_OK:	return "COMPLETE_QF_OK";
3088  	case TRANSPORT_COMPLETE_QF_ERR:	return "COMPLETE_QF_ERR";
3089  	}
3090  
3091  	return "(?)";
3092  }
3093  
target_append_str(char ** str,const char * txt)3094  static void target_append_str(char **str, const char *txt)
3095  {
3096  	char *prev = *str;
3097  
3098  	*str = *str ? kasprintf(GFP_ATOMIC, "%s,%s", *str, txt) :
3099  		kstrdup(txt, GFP_ATOMIC);
3100  	kfree(prev);
3101  }
3102  
3103  /*
3104   * Convert a transport state bitmask into a string. The caller is
3105   * responsible for freeing the returned pointer.
3106   */
target_ts_to_str(u32 ts)3107  static char *target_ts_to_str(u32 ts)
3108  {
3109  	char *str = NULL;
3110  
3111  	if (ts & CMD_T_ABORTED)
3112  		target_append_str(&str, "aborted");
3113  	if (ts & CMD_T_ACTIVE)
3114  		target_append_str(&str, "active");
3115  	if (ts & CMD_T_COMPLETE)
3116  		target_append_str(&str, "complete");
3117  	if (ts & CMD_T_SENT)
3118  		target_append_str(&str, "sent");
3119  	if (ts & CMD_T_STOP)
3120  		target_append_str(&str, "stop");
3121  	if (ts & CMD_T_FABRIC_STOP)
3122  		target_append_str(&str, "fabric_stop");
3123  
3124  	return str;
3125  }
3126  
target_tmf_name(enum tcm_tmreq_table tmf)3127  static const char *target_tmf_name(enum tcm_tmreq_table tmf)
3128  {
3129  	switch (tmf) {
3130  	case TMR_ABORT_TASK:		return "ABORT_TASK";
3131  	case TMR_ABORT_TASK_SET:	return "ABORT_TASK_SET";
3132  	case TMR_CLEAR_ACA:		return "CLEAR_ACA";
3133  	case TMR_CLEAR_TASK_SET:	return "CLEAR_TASK_SET";
3134  	case TMR_LUN_RESET:		return "LUN_RESET";
3135  	case TMR_TARGET_WARM_RESET:	return "TARGET_WARM_RESET";
3136  	case TMR_TARGET_COLD_RESET:	return "TARGET_COLD_RESET";
3137  	case TMR_LUN_RESET_PRO:		return "LUN_RESET_PRO";
3138  	case TMR_UNKNOWN:		break;
3139  	}
3140  	return "(?)";
3141  }
3142  
target_show_cmd(const char * pfx,struct se_cmd * cmd)3143  void target_show_cmd(const char *pfx, struct se_cmd *cmd)
3144  {
3145  	char *ts_str = target_ts_to_str(cmd->transport_state);
3146  	const u8 *cdb = cmd->t_task_cdb;
3147  	struct se_tmr_req *tmf = cmd->se_tmr_req;
3148  
3149  	if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
3150  		pr_debug("%scmd %#02x:%#02x with tag %#llx dir %s i_state %d t_state %s len %d refcnt %d transport_state %s\n",
3151  			 pfx, cdb[0], cdb[1], cmd->tag,
3152  			 data_dir_name(cmd->data_direction),
3153  			 cmd->se_tfo->get_cmd_state(cmd),
3154  			 cmd_state_name(cmd->t_state), cmd->data_length,
3155  			 kref_read(&cmd->cmd_kref), ts_str);
3156  	} else {
3157  		pr_debug("%stmf %s with tag %#llx ref_task_tag %#llx i_state %d t_state %s refcnt %d transport_state %s\n",
3158  			 pfx, target_tmf_name(tmf->function), cmd->tag,
3159  			 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd),
3160  			 cmd_state_name(cmd->t_state),
3161  			 kref_read(&cmd->cmd_kref), ts_str);
3162  	}
3163  	kfree(ts_str);
3164  }
3165  EXPORT_SYMBOL(target_show_cmd);
3166  
target_stop_cmd_counter_confirm(struct percpu_ref * ref)3167  static void target_stop_cmd_counter_confirm(struct percpu_ref *ref)
3168  {
3169  	struct target_cmd_counter *cmd_cnt = container_of(ref,
3170  						struct target_cmd_counter,
3171  						refcnt);
3172  	complete_all(&cmd_cnt->stop_done);
3173  }
3174  
3175  /**
3176   * target_stop_cmd_counter - Stop new IO from being added to the counter.
3177   * @cmd_cnt: counter to stop
3178   */
target_stop_cmd_counter(struct target_cmd_counter * cmd_cnt)3179  void target_stop_cmd_counter(struct target_cmd_counter *cmd_cnt)
3180  {
3181  	pr_debug("Stopping command counter.\n");
3182  	if (!atomic_cmpxchg(&cmd_cnt->stopped, 0, 1))
3183  		percpu_ref_kill_and_confirm(&cmd_cnt->refcnt,
3184  					    target_stop_cmd_counter_confirm);
3185  }
3186  EXPORT_SYMBOL_GPL(target_stop_cmd_counter);
3187  
3188  /**
3189   * target_stop_session - Stop new IO from being queued on the session.
3190   * @se_sess: session to stop
3191   */
target_stop_session(struct se_session * se_sess)3192  void target_stop_session(struct se_session *se_sess)
3193  {
3194  	target_stop_cmd_counter(se_sess->cmd_cnt);
3195  }
3196  EXPORT_SYMBOL(target_stop_session);
3197  
3198  /**
3199   * target_wait_for_cmds - Wait for outstanding cmds.
3200   * @cmd_cnt: counter to wait for active I/O for.
3201   */
target_wait_for_cmds(struct target_cmd_counter * cmd_cnt)3202  void target_wait_for_cmds(struct target_cmd_counter *cmd_cnt)
3203  {
3204  	int ret;
3205  
3206  	WARN_ON_ONCE(!atomic_read(&cmd_cnt->stopped));
3207  
3208  	do {
3209  		pr_debug("Waiting for running cmds to complete.\n");
3210  		ret = wait_event_timeout(cmd_cnt->refcnt_wq,
3211  					 percpu_ref_is_zero(&cmd_cnt->refcnt),
3212  					 180 * HZ);
3213  	} while (ret <= 0);
3214  
3215  	wait_for_completion(&cmd_cnt->stop_done);
3216  	pr_debug("Waiting for cmds done.\n");
3217  }
3218  EXPORT_SYMBOL_GPL(target_wait_for_cmds);
3219  
3220  /**
3221   * target_wait_for_sess_cmds - Wait for outstanding commands
3222   * @se_sess: session to wait for active I/O
3223   */
target_wait_for_sess_cmds(struct se_session * se_sess)3224  void target_wait_for_sess_cmds(struct se_session *se_sess)
3225  {
3226  	target_wait_for_cmds(se_sess->cmd_cnt);
3227  }
3228  EXPORT_SYMBOL(target_wait_for_sess_cmds);
3229  
3230  /*
3231   * Prevent that new percpu_ref_tryget_live() calls succeed and wait until
3232   * all references to the LUN have been released. Called during LUN shutdown.
3233   */
transport_clear_lun_ref(struct se_lun * lun)3234  void transport_clear_lun_ref(struct se_lun *lun)
3235  {
3236  	percpu_ref_kill(&lun->lun_ref);
3237  	wait_for_completion(&lun->lun_shutdown_comp);
3238  }
3239  
3240  static bool
__transport_wait_for_tasks(struct se_cmd * cmd,bool fabric_stop,bool * aborted,bool * tas,unsigned long * flags)3241  __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
3242  			   bool *aborted, bool *tas, unsigned long *flags)
3243  	__releases(&cmd->t_state_lock)
3244  	__acquires(&cmd->t_state_lock)
3245  {
3246  	lockdep_assert_held(&cmd->t_state_lock);
3247  
3248  	if (fabric_stop)
3249  		cmd->transport_state |= CMD_T_FABRIC_STOP;
3250  
3251  	if (cmd->transport_state & CMD_T_ABORTED)
3252  		*aborted = true;
3253  
3254  	if (cmd->transport_state & CMD_T_TAS)
3255  		*tas = true;
3256  
3257  	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
3258  	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3259  		return false;
3260  
3261  	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
3262  	    !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
3263  		return false;
3264  
3265  	if (!(cmd->transport_state & CMD_T_ACTIVE))
3266  		return false;
3267  
3268  	if (fabric_stop && *aborted)
3269  		return false;
3270  
3271  	cmd->transport_state |= CMD_T_STOP;
3272  
3273  	target_show_cmd("wait_for_tasks: Stopping ", cmd);
3274  
3275  	spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
3276  
3277  	while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp,
3278  					    180 * HZ))
3279  		target_show_cmd("wait for tasks: ", cmd);
3280  
3281  	spin_lock_irqsave(&cmd->t_state_lock, *flags);
3282  	cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
3283  
3284  	pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
3285  		 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
3286  
3287  	return true;
3288  }
3289  
3290  /**
3291   * transport_wait_for_tasks - set CMD_T_STOP and wait for t_transport_stop_comp
3292   * @cmd: command to wait on
3293   */
transport_wait_for_tasks(struct se_cmd * cmd)3294  bool transport_wait_for_tasks(struct se_cmd *cmd)
3295  {
3296  	unsigned long flags;
3297  	bool ret, aborted = false, tas = false;
3298  
3299  	spin_lock_irqsave(&cmd->t_state_lock, flags);
3300  	ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
3301  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3302  
3303  	return ret;
3304  }
3305  EXPORT_SYMBOL(transport_wait_for_tasks);
3306  
3307  struct sense_detail {
3308  	u8 key;
3309  	u8 asc;
3310  	u8 ascq;
3311  	bool add_sense_info;
3312  };
3313  
3314  static const struct sense_detail sense_detail_table[] = {
3315  	[TCM_NO_SENSE] = {
3316  		.key = NOT_READY
3317  	},
3318  	[TCM_NON_EXISTENT_LUN] = {
3319  		.key = ILLEGAL_REQUEST,
3320  		.asc = 0x25 /* LOGICAL UNIT NOT SUPPORTED */
3321  	},
3322  	[TCM_UNSUPPORTED_SCSI_OPCODE] = {
3323  		.key = ILLEGAL_REQUEST,
3324  		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3325  	},
3326  	[TCM_SECTOR_COUNT_TOO_MANY] = {
3327  		.key = ILLEGAL_REQUEST,
3328  		.asc = 0x20, /* INVALID COMMAND OPERATION CODE */
3329  	},
3330  	[TCM_UNKNOWN_MODE_PAGE] = {
3331  		.key = ILLEGAL_REQUEST,
3332  		.asc = 0x24, /* INVALID FIELD IN CDB */
3333  	},
3334  	[TCM_CHECK_CONDITION_ABORT_CMD] = {
3335  		.key = ABORTED_COMMAND,
3336  		.asc = 0x29, /* BUS DEVICE RESET FUNCTION OCCURRED */
3337  		.ascq = 0x03,
3338  	},
3339  	[TCM_INCORRECT_AMOUNT_OF_DATA] = {
3340  		.key = ABORTED_COMMAND,
3341  		.asc = 0x0c, /* WRITE ERROR */
3342  		.ascq = 0x0d, /* NOT ENOUGH UNSOLICITED DATA */
3343  	},
3344  	[TCM_INVALID_CDB_FIELD] = {
3345  		.key = ILLEGAL_REQUEST,
3346  		.asc = 0x24, /* INVALID FIELD IN CDB */
3347  	},
3348  	[TCM_INVALID_PARAMETER_LIST] = {
3349  		.key = ILLEGAL_REQUEST,
3350  		.asc = 0x26, /* INVALID FIELD IN PARAMETER LIST */
3351  	},
3352  	[TCM_TOO_MANY_TARGET_DESCS] = {
3353  		.key = ILLEGAL_REQUEST,
3354  		.asc = 0x26,
3355  		.ascq = 0x06, /* TOO MANY TARGET DESCRIPTORS */
3356  	},
3357  	[TCM_UNSUPPORTED_TARGET_DESC_TYPE_CODE] = {
3358  		.key = ILLEGAL_REQUEST,
3359  		.asc = 0x26,
3360  		.ascq = 0x07, /* UNSUPPORTED TARGET DESCRIPTOR TYPE CODE */
3361  	},
3362  	[TCM_TOO_MANY_SEGMENT_DESCS] = {
3363  		.key = ILLEGAL_REQUEST,
3364  		.asc = 0x26,
3365  		.ascq = 0x08, /* TOO MANY SEGMENT DESCRIPTORS */
3366  	},
3367  	[TCM_UNSUPPORTED_SEGMENT_DESC_TYPE_CODE] = {
3368  		.key = ILLEGAL_REQUEST,
3369  		.asc = 0x26,
3370  		.ascq = 0x09, /* UNSUPPORTED SEGMENT DESCRIPTOR TYPE CODE */
3371  	},
3372  	[TCM_PARAMETER_LIST_LENGTH_ERROR] = {
3373  		.key = ILLEGAL_REQUEST,
3374  		.asc = 0x1a, /* PARAMETER LIST LENGTH ERROR */
3375  	},
3376  	[TCM_UNEXPECTED_UNSOLICITED_DATA] = {
3377  		.key = ILLEGAL_REQUEST,
3378  		.asc = 0x0c, /* WRITE ERROR */
3379  		.ascq = 0x0c, /* UNEXPECTED_UNSOLICITED_DATA */
3380  	},
3381  	[TCM_SERVICE_CRC_ERROR] = {
3382  		.key = ABORTED_COMMAND,
3383  		.asc = 0x47, /* PROTOCOL SERVICE CRC ERROR */
3384  		.ascq = 0x05, /* N/A */
3385  	},
3386  	[TCM_SNACK_REJECTED] = {
3387  		.key = ABORTED_COMMAND,
3388  		.asc = 0x11, /* READ ERROR */
3389  		.ascq = 0x13, /* FAILED RETRANSMISSION REQUEST */
3390  	},
3391  	[TCM_WRITE_PROTECTED] = {
3392  		.key = DATA_PROTECT,
3393  		.asc = 0x27, /* WRITE PROTECTED */
3394  	},
3395  	[TCM_ADDRESS_OUT_OF_RANGE] = {
3396  		.key = ILLEGAL_REQUEST,
3397  		.asc = 0x21, /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
3398  	},
3399  	[TCM_CHECK_CONDITION_UNIT_ATTENTION] = {
3400  		.key = UNIT_ATTENTION,
3401  	},
3402  	[TCM_MISCOMPARE_VERIFY] = {
3403  		.key = MISCOMPARE,
3404  		.asc = 0x1d, /* MISCOMPARE DURING VERIFY OPERATION */
3405  		.ascq = 0x00,
3406  		.add_sense_info = true,
3407  	},
3408  	[TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED] = {
3409  		.key = ABORTED_COMMAND,
3410  		.asc = 0x10,
3411  		.ascq = 0x01, /* LOGICAL BLOCK GUARD CHECK FAILED */
3412  		.add_sense_info = true,
3413  	},
3414  	[TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED] = {
3415  		.key = ABORTED_COMMAND,
3416  		.asc = 0x10,
3417  		.ascq = 0x02, /* LOGICAL BLOCK APPLICATION TAG CHECK FAILED */
3418  		.add_sense_info = true,
3419  	},
3420  	[TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED] = {
3421  		.key = ABORTED_COMMAND,
3422  		.asc = 0x10,
3423  		.ascq = 0x03, /* LOGICAL BLOCK REFERENCE TAG CHECK FAILED */
3424  		.add_sense_info = true,
3425  	},
3426  	[TCM_COPY_TARGET_DEVICE_NOT_REACHABLE] = {
3427  		.key = COPY_ABORTED,
3428  		.asc = 0x0d,
3429  		.ascq = 0x02, /* COPY TARGET DEVICE NOT REACHABLE */
3430  
3431  	},
3432  	[TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE] = {
3433  		/*
3434  		 * Returning ILLEGAL REQUEST would cause immediate IO errors on
3435  		 * Solaris initiators.  Returning NOT READY instead means the
3436  		 * operations will be retried a finite number of times and we
3437  		 * can survive intermittent errors.
3438  		 */
3439  		.key = NOT_READY,
3440  		.asc = 0x08, /* LOGICAL UNIT COMMUNICATION FAILURE */
3441  	},
3442  	[TCM_INSUFFICIENT_REGISTRATION_RESOURCES] = {
3443  		/*
3444  		 * From spc4r22 section5.7.7,5.7.8
3445  		 * If a PERSISTENT RESERVE OUT command with a REGISTER service action
3446  		 * or a REGISTER AND IGNORE EXISTING KEY service action or
3447  		 * REGISTER AND MOVE service actionis attempted,
3448  		 * but there are insufficient device server resources to complete the
3449  		 * operation, then the command shall be terminated with CHECK CONDITION
3450  		 * status, with the sense key set to ILLEGAL REQUEST,and the additonal
3451  		 * sense code set to INSUFFICIENT REGISTRATION RESOURCES.
3452  		 */
3453  		.key = ILLEGAL_REQUEST,
3454  		.asc = 0x55,
3455  		.ascq = 0x04, /* INSUFFICIENT REGISTRATION RESOURCES */
3456  	},
3457  	[TCM_INVALID_FIELD_IN_COMMAND_IU] = {
3458  		.key = ILLEGAL_REQUEST,
3459  		.asc = 0x0e,
3460  		.ascq = 0x03, /* INVALID FIELD IN COMMAND INFORMATION UNIT */
3461  	},
3462  	[TCM_ALUA_TG_PT_STANDBY] = {
3463  		.key = NOT_READY,
3464  		.asc = 0x04,
3465  		.ascq = ASCQ_04H_ALUA_TG_PT_STANDBY,
3466  	},
3467  	[TCM_ALUA_TG_PT_UNAVAILABLE] = {
3468  		.key = NOT_READY,
3469  		.asc = 0x04,
3470  		.ascq = ASCQ_04H_ALUA_TG_PT_UNAVAILABLE,
3471  	},
3472  	[TCM_ALUA_STATE_TRANSITION] = {
3473  		.key = NOT_READY,
3474  		.asc = 0x04,
3475  		.ascq = ASCQ_04H_ALUA_STATE_TRANSITION,
3476  	},
3477  	[TCM_ALUA_OFFLINE] = {
3478  		.key = NOT_READY,
3479  		.asc = 0x04,
3480  		.ascq = ASCQ_04H_ALUA_OFFLINE,
3481  	},
3482  };
3483  
3484  /**
3485   * translate_sense_reason - translate a sense reason into T10 key, asc and ascq
3486   * @cmd: SCSI command in which the resulting sense buffer or SCSI status will
3487   *   be stored.
3488   * @reason: LIO sense reason code. If this argument has the value
3489   *   TCM_CHECK_CONDITION_UNIT_ATTENTION, try to dequeue a unit attention. If
3490   *   dequeuing a unit attention fails due to multiple commands being processed
3491   *   concurrently, set the command status to BUSY.
3492   *
3493   * Return: 0 upon success or -EINVAL if the sense buffer is too small.
3494   */
translate_sense_reason(struct se_cmd * cmd,sense_reason_t reason)3495  static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason)
3496  {
3497  	const struct sense_detail *sd;
3498  	u8 *buffer = cmd->sense_buffer;
3499  	int r = (__force int)reason;
3500  	u8 key, asc, ascq;
3501  	bool desc_format = target_sense_desc_format(cmd->se_dev);
3502  
3503  	if (r < ARRAY_SIZE(sense_detail_table) && sense_detail_table[r].key)
3504  		sd = &sense_detail_table[r];
3505  	else
3506  		sd = &sense_detail_table[(__force int)
3507  				       TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE];
3508  
3509  	key = sd->key;
3510  	if (reason == TCM_CHECK_CONDITION_UNIT_ATTENTION) {
3511  		if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc,
3512  						       &ascq)) {
3513  			cmd->scsi_status = SAM_STAT_BUSY;
3514  			return;
3515  		}
3516  	} else {
3517  		WARN_ON_ONCE(sd->asc == 0);
3518  		asc = sd->asc;
3519  		ascq = sd->ascq;
3520  	}
3521  
3522  	cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE;
3523  	cmd->scsi_status = SAM_STAT_CHECK_CONDITION;
3524  	cmd->scsi_sense_length  = TRANSPORT_SENSE_BUFFER;
3525  	scsi_build_sense_buffer(desc_format, buffer, key, asc, ascq);
3526  	if (sd->add_sense_info)
3527  		WARN_ON_ONCE(scsi_set_sense_information(buffer,
3528  							cmd->scsi_sense_length,
3529  							cmd->sense_info) < 0);
3530  }
3531  
3532  int
transport_send_check_condition_and_sense(struct se_cmd * cmd,sense_reason_t reason,int from_transport)3533  transport_send_check_condition_and_sense(struct se_cmd *cmd,
3534  		sense_reason_t reason, int from_transport)
3535  {
3536  	unsigned long flags;
3537  
3538  	WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3539  
3540  	spin_lock_irqsave(&cmd->t_state_lock, flags);
3541  	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {
3542  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3543  		return 0;
3544  	}
3545  	cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION;
3546  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3547  
3548  	if (!from_transport)
3549  		translate_sense_reason(cmd, reason);
3550  
3551  	trace_target_cmd_complete(cmd);
3552  	return cmd->se_tfo->queue_status(cmd);
3553  }
3554  EXPORT_SYMBOL(transport_send_check_condition_and_sense);
3555  
3556  /**
3557   * target_send_busy - Send SCSI BUSY status back to the initiator
3558   * @cmd: SCSI command for which to send a BUSY reply.
3559   *
3560   * Note: Only call this function if target_submit_cmd*() failed.
3561   */
target_send_busy(struct se_cmd * cmd)3562  int target_send_busy(struct se_cmd *cmd)
3563  {
3564  	WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB);
3565  
3566  	cmd->scsi_status = SAM_STAT_BUSY;
3567  	trace_target_cmd_complete(cmd);
3568  	return cmd->se_tfo->queue_status(cmd);
3569  }
3570  EXPORT_SYMBOL(target_send_busy);
3571  
target_tmr_work(struct work_struct * work)3572  static void target_tmr_work(struct work_struct *work)
3573  {
3574  	struct se_cmd *cmd = container_of(work, struct se_cmd, work);
3575  	struct se_device *dev = cmd->se_dev;
3576  	struct se_tmr_req *tmr = cmd->se_tmr_req;
3577  	int ret;
3578  
3579  	if (cmd->transport_state & CMD_T_ABORTED)
3580  		goto aborted;
3581  
3582  	switch (tmr->function) {
3583  	case TMR_ABORT_TASK:
3584  		core_tmr_abort_task(dev, tmr, cmd->se_sess);
3585  		break;
3586  	case TMR_ABORT_TASK_SET:
3587  	case TMR_CLEAR_ACA:
3588  	case TMR_CLEAR_TASK_SET:
3589  		tmr->response = TMR_TASK_MGMT_FUNCTION_NOT_SUPPORTED;
3590  		break;
3591  	case TMR_LUN_RESET:
3592  		ret = core_tmr_lun_reset(dev, tmr, NULL, NULL);
3593  		tmr->response = (!ret) ? TMR_FUNCTION_COMPLETE :
3594  					 TMR_FUNCTION_REJECTED;
3595  		if (tmr->response == TMR_FUNCTION_COMPLETE) {
3596  			target_dev_ua_allocate(dev, 0x29,
3597  					       ASCQ_29H_BUS_DEVICE_RESET_FUNCTION_OCCURRED);
3598  		}
3599  		break;
3600  	case TMR_TARGET_WARM_RESET:
3601  		tmr->response = TMR_FUNCTION_REJECTED;
3602  		break;
3603  	case TMR_TARGET_COLD_RESET:
3604  		tmr->response = TMR_FUNCTION_REJECTED;
3605  		break;
3606  	default:
3607  		pr_err("Unknown TMR function: 0x%02x.\n",
3608  				tmr->function);
3609  		tmr->response = TMR_FUNCTION_REJECTED;
3610  		break;
3611  	}
3612  
3613  	if (cmd->transport_state & CMD_T_ABORTED)
3614  		goto aborted;
3615  
3616  	cmd->se_tfo->queue_tm_rsp(cmd);
3617  
3618  	transport_lun_remove_cmd(cmd);
3619  	transport_cmd_check_stop_to_fabric(cmd);
3620  	return;
3621  
3622  aborted:
3623  	target_handle_abort(cmd);
3624  }
3625  
transport_generic_handle_tmr(struct se_cmd * cmd)3626  int transport_generic_handle_tmr(
3627  	struct se_cmd *cmd)
3628  {
3629  	unsigned long flags;
3630  	bool aborted = false;
3631  
3632  	spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags);
3633  	list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list);
3634  	spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags);
3635  
3636  	spin_lock_irqsave(&cmd->t_state_lock, flags);
3637  	if (cmd->transport_state & CMD_T_ABORTED) {
3638  		aborted = true;
3639  	} else {
3640  		cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
3641  		cmd->transport_state |= CMD_T_ACTIVE;
3642  	}
3643  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
3644  
3645  	if (aborted) {
3646  		pr_warn_ratelimited("handle_tmr caught CMD_T_ABORTED TMR %d ref_tag: %llu tag: %llu\n",
3647  				    cmd->se_tmr_req->function,
3648  				    cmd->se_tmr_req->ref_task_tag, cmd->tag);
3649  		target_handle_abort(cmd);
3650  		return 0;
3651  	}
3652  
3653  	INIT_WORK(&cmd->work, target_tmr_work);
3654  	schedule_work(&cmd->work);
3655  	return 0;
3656  }
3657  EXPORT_SYMBOL(transport_generic_handle_tmr);
3658  
3659  bool
target_check_wce(struct se_device * dev)3660  target_check_wce(struct se_device *dev)
3661  {
3662  	bool wce = false;
3663  
3664  	if (dev->transport->get_write_cache)
3665  		wce = dev->transport->get_write_cache(dev);
3666  	else if (dev->dev_attrib.emulate_write_cache > 0)
3667  		wce = true;
3668  
3669  	return wce;
3670  }
3671  
3672  bool
target_check_fua(struct se_device * dev)3673  target_check_fua(struct se_device *dev)
3674  {
3675  	return target_check_wce(dev) && dev->dev_attrib.emulate_fua_write > 0;
3676  }
3677