Lines Matching refs:cmd

54 static void transport_complete_task_attr(struct se_cmd *cmd);
55 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason);
56 static void transport_handle_queue_full(struct se_cmd *cmd,
696 static void target_remove_from_state_list(struct se_cmd *cmd) in target_remove_from_state_list() argument
698 struct se_device *dev = cmd->se_dev; in target_remove_from_state_list()
704 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); in target_remove_from_state_list()
705 if (cmd->state_active) { in target_remove_from_state_list()
706 list_del(&cmd->state_list); in target_remove_from_state_list()
707 cmd->state_active = false; in target_remove_from_state_list()
709 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); in target_remove_from_state_list()
712 static void target_remove_from_tmr_list(struct se_cmd *cmd) in target_remove_from_tmr_list() argument
717 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) in target_remove_from_tmr_list()
718 dev = cmd->se_tmr_req->tmr_dev; in target_remove_from_tmr_list()
722 if (cmd->se_tmr_req->tmr_dev) in target_remove_from_tmr_list()
723 list_del_init(&cmd->se_tmr_req->tmr_list); in target_remove_from_tmr_list()
734 static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd) in transport_cmd_check_stop_to_fabric() argument
738 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_cmd_check_stop_to_fabric()
743 if (cmd->transport_state & CMD_T_STOP) { in transport_cmd_check_stop_to_fabric()
745 __func__, __LINE__, cmd->tag); in transport_cmd_check_stop_to_fabric()
747 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_cmd_check_stop_to_fabric()
749 complete_all(&cmd->t_transport_stop_comp); in transport_cmd_check_stop_to_fabric()
752 cmd->transport_state &= ~CMD_T_ACTIVE; in transport_cmd_check_stop_to_fabric()
753 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_cmd_check_stop_to_fabric()
762 return cmd->se_tfo->check_stop_free(cmd); in transport_cmd_check_stop_to_fabric()
765 static void transport_lun_remove_cmd(struct se_cmd *cmd) in transport_lun_remove_cmd() argument
767 struct se_lun *lun = cmd->se_lun; in transport_lun_remove_cmd()
772 target_remove_from_state_list(cmd); in transport_lun_remove_cmd()
773 target_remove_from_tmr_list(cmd); in transport_lun_remove_cmd()
775 if (cmpxchg(&cmd->lun_ref_active, true, false)) in transport_lun_remove_cmd()
781 cmd->se_lun = NULL; in transport_lun_remove_cmd()
786 struct se_cmd *cmd = container_of(work, struct se_cmd, work); in target_complete_failure_work() local
788 transport_generic_request_failure(cmd, cmd->sense_reason); in target_complete_failure_work()
795 static unsigned char *transport_get_sense_buffer(struct se_cmd *cmd) in transport_get_sense_buffer() argument
797 struct se_device *dev = cmd->se_dev; in transport_get_sense_buffer()
799 WARN_ON(!cmd->se_lun); in transport_get_sense_buffer()
804 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) in transport_get_sense_buffer()
807 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; in transport_get_sense_buffer()
810 dev->se_hba->hba_id, dev->transport->name, cmd->scsi_status); in transport_get_sense_buffer()
811 return cmd->sense_buffer; in transport_get_sense_buffer()
814 void transport_copy_sense_to_cmd(struct se_cmd *cmd, unsigned char *sense) in transport_copy_sense_to_cmd() argument
819 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_copy_sense_to_cmd()
820 cmd_sense_buf = transport_get_sense_buffer(cmd); in transport_copy_sense_to_cmd()
822 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_copy_sense_to_cmd()
826 cmd->se_cmd_flags |= SCF_TRANSPORT_TASK_SENSE; in transport_copy_sense_to_cmd()
827 memcpy(cmd_sense_buf, sense, cmd->scsi_sense_length); in transport_copy_sense_to_cmd()
828 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_copy_sense_to_cmd()
832 static void target_handle_abort(struct se_cmd *cmd) in target_handle_abort() argument
834 bool tas = cmd->transport_state & CMD_T_TAS; in target_handle_abort()
835 bool ack_kref = cmd->se_cmd_flags & SCF_ACK_KREF; in target_handle_abort()
838 pr_debug("tag %#llx: send_abort_response = %d\n", cmd->tag, tas); in target_handle_abort()
841 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { in target_handle_abort()
842 cmd->scsi_status = SAM_STAT_TASK_ABORTED; in target_handle_abort()
844 cmd->t_task_cdb[0], cmd->tag); in target_handle_abort()
845 trace_target_cmd_complete(cmd); in target_handle_abort()
846 ret = cmd->se_tfo->queue_status(cmd); in target_handle_abort()
848 transport_handle_queue_full(cmd, cmd->se_dev, in target_handle_abort()
853 cmd->se_tmr_req->response = TMR_FUNCTION_REJECTED; in target_handle_abort()
854 cmd->se_tfo->queue_tm_rsp(cmd); in target_handle_abort()
861 cmd->se_tfo->aborted_task(cmd); in target_handle_abort()
863 WARN_ON_ONCE(target_put_sess_cmd(cmd) != 0); in target_handle_abort()
871 WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0); in target_handle_abort()
873 transport_lun_remove_cmd(cmd); in target_handle_abort()
875 transport_cmd_check_stop_to_fabric(cmd); in target_handle_abort()
880 struct se_cmd *cmd = container_of(work, struct se_cmd, work); in target_abort_work() local
882 target_handle_abort(cmd); in target_abort_work()
885 static bool target_cmd_interrupted(struct se_cmd *cmd) in target_cmd_interrupted() argument
889 if (cmd->transport_state & CMD_T_ABORTED) { in target_cmd_interrupted()
890 if (cmd->transport_complete_callback) in target_cmd_interrupted()
891 cmd->transport_complete_callback(cmd, false, &post_ret); in target_cmd_interrupted()
892 INIT_WORK(&cmd->work, target_abort_work); in target_cmd_interrupted()
893 queue_work(target_completion_wq, &cmd->work); in target_cmd_interrupted()
895 } else if (cmd->transport_state & CMD_T_STOP) { in target_cmd_interrupted()
896 if (cmd->transport_complete_callback) in target_cmd_interrupted()
897 cmd->transport_complete_callback(cmd, false, &post_ret); in target_cmd_interrupted()
898 complete_all(&cmd->t_transport_stop_comp); in target_cmd_interrupted()
906 void target_complete_cmd_with_sense(struct se_cmd *cmd, u8 scsi_status, in target_complete_cmd_with_sense() argument
909 struct se_wwn *wwn = cmd->se_sess->se_tpg->se_tpg_wwn; in target_complete_cmd_with_sense()
913 if (target_cmd_interrupted(cmd)) in target_complete_cmd_with_sense()
916 cmd->scsi_status = scsi_status; in target_complete_cmd_with_sense()
917 cmd->sense_reason = sense_reason; in target_complete_cmd_with_sense()
919 spin_lock_irqsave(&cmd->t_state_lock, flags); in target_complete_cmd_with_sense()
920 switch (cmd->scsi_status) { in target_complete_cmd_with_sense()
922 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) in target_complete_cmd_with_sense()
932 cmd->t_state = TRANSPORT_COMPLETE; in target_complete_cmd_with_sense()
933 cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE); in target_complete_cmd_with_sense()
934 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in target_complete_cmd_with_sense()
936 INIT_WORK(&cmd->work, success ? target_complete_ok_work : in target_complete_cmd_with_sense()
940 cpu = cmd->cpuid; in target_complete_cmd_with_sense()
944 queue_work_on(cpu, target_completion_wq, &cmd->work); in target_complete_cmd_with_sense()
948 void target_complete_cmd(struct se_cmd *cmd, u8 scsi_status) in target_complete_cmd() argument
950 target_complete_cmd_with_sense(cmd, scsi_status, scsi_status ? in target_complete_cmd()
956 void target_set_cmd_data_length(struct se_cmd *cmd, int length) in target_set_cmd_data_length() argument
958 if (length < cmd->data_length) { in target_set_cmd_data_length()
959 if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { in target_set_cmd_data_length()
960 cmd->residual_count += cmd->data_length - length; in target_set_cmd_data_length()
962 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; in target_set_cmd_data_length()
963 cmd->residual_count = cmd->data_length - length; in target_set_cmd_data_length()
966 cmd->data_length = length; in target_set_cmd_data_length()
971 void target_complete_cmd_with_length(struct se_cmd *cmd, u8 scsi_status, int length) in target_complete_cmd_with_length() argument
974 cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) { in target_complete_cmd_with_length()
975 target_set_cmd_data_length(cmd, length); in target_complete_cmd_with_length()
978 target_complete_cmd(cmd, scsi_status); in target_complete_cmd_with_length()
982 static void target_add_to_state_list(struct se_cmd *cmd) in target_add_to_state_list() argument
984 struct se_device *dev = cmd->se_dev; in target_add_to_state_list()
987 spin_lock_irqsave(&dev->queues[cmd->cpuid].lock, flags); in target_add_to_state_list()
988 if (!cmd->state_active) { in target_add_to_state_list()
989 list_add_tail(&cmd->state_list, in target_add_to_state_list()
990 &dev->queues[cmd->cpuid].state_list); in target_add_to_state_list()
991 cmd->state_active = true; in target_add_to_state_list()
993 spin_unlock_irqrestore(&dev->queues[cmd->cpuid].lock, flags); in target_add_to_state_list()
999 static void transport_write_pending_qf(struct se_cmd *cmd);
1000 static void transport_complete_qf(struct se_cmd *cmd);
1007 struct se_cmd *cmd, *cmd_tmp; in target_qf_do_work() local
1013 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { in target_qf_do_work()
1014 list_del(&cmd->se_qf_node); in target_qf_do_work()
1018 " context: %s\n", cmd->se_tfo->fabric_name, cmd, in target_qf_do_work()
1019 (cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" : in target_qf_do_work()
1020 (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING" in target_qf_do_work()
1023 if (cmd->t_state == TRANSPORT_COMPLETE_QF_WP) in target_qf_do_work()
1024 transport_write_pending_qf(cmd); in target_qf_do_work()
1025 else if (cmd->t_state == TRANSPORT_COMPLETE_QF_OK || in target_qf_do_work()
1026 cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) in target_qf_do_work()
1027 transport_complete_qf(cmd); in target_qf_do_work()
1031 unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) in transport_dump_cmd_direction() argument
1033 switch (cmd->data_direction) { in transport_dump_cmd_direction()
1324 target_check_max_data_sg_nents(struct se_cmd *cmd, struct se_device *dev, in target_check_max_data_sg_nents() argument
1329 if (!cmd->se_tfo->max_data_sg_nents) in target_check_max_data_sg_nents()
1337 mtl = (cmd->se_tfo->max_data_sg_nents * PAGE_SIZE); in target_check_max_data_sg_nents()
1338 if (cmd->data_length > mtl) { in target_check_max_data_sg_nents()
1350 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { in target_check_max_data_sg_nents()
1351 cmd->residual_count = (size - mtl); in target_check_max_data_sg_nents()
1352 } else if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) { in target_check_max_data_sg_nents()
1353 u32 orig_dl = size + cmd->residual_count; in target_check_max_data_sg_nents()
1354 cmd->residual_count = (orig_dl - mtl); in target_check_max_data_sg_nents()
1356 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; in target_check_max_data_sg_nents()
1357 cmd->residual_count = (cmd->data_length - mtl); in target_check_max_data_sg_nents()
1359 cmd->data_length = mtl; in target_check_max_data_sg_nents()
1364 if (cmd->prot_length) { in target_check_max_data_sg_nents()
1366 cmd->prot_length = dev->prot_length * sectors; in target_check_max_data_sg_nents()
1386 target_cmd_size_check(struct se_cmd *cmd, unsigned int size) in target_cmd_size_check() argument
1388 struct se_device *dev = cmd->se_dev; in target_cmd_size_check()
1390 if (cmd->unknown_data_length) { in target_cmd_size_check()
1391 cmd->data_length = size; in target_cmd_size_check()
1392 } else if (size != cmd->data_length) { in target_cmd_size_check()
1395 " 0x%02x\n", cmd->se_tfo->fabric_name, in target_cmd_size_check()
1396 cmd->data_length, size, cmd->t_task_cdb[0]); in target_cmd_size_check()
1403 if (size > cmd->data_length) { in target_cmd_size_check()
1404 cmd->se_cmd_flags |= SCF_OVERFLOW_BIT; in target_cmd_size_check()
1405 cmd->residual_count = (size - cmd->data_length); in target_cmd_size_check()
1407 cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT; in target_cmd_size_check()
1408 cmd->residual_count = (cmd->data_length - size); in target_cmd_size_check()
1413 if (cmd->data_direction == DMA_FROM_DEVICE) { in target_cmd_size_check()
1414 cmd->data_length = size; in target_cmd_size_check()
1418 if (cmd->data_direction == DMA_TO_DEVICE) { in target_cmd_size_check()
1419 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { in target_cmd_size_check()
1430 if (size > cmd->data_length) { in target_cmd_size_check()
1438 return target_check_max_data_sg_nents(cmd, dev, size); in target_cmd_size_check()
1448 void __target_init_cmd(struct se_cmd *cmd, in __target_init_cmd() argument
1455 INIT_LIST_HEAD(&cmd->se_delayed_node); in __target_init_cmd()
1456 INIT_LIST_HEAD(&cmd->se_qf_node); in __target_init_cmd()
1457 INIT_LIST_HEAD(&cmd->state_list); in __target_init_cmd()
1458 init_completion(&cmd->t_transport_stop_comp); in __target_init_cmd()
1459 cmd->free_compl = NULL; in __target_init_cmd()
1460 cmd->abrt_compl = NULL; in __target_init_cmd()
1461 spin_lock_init(&cmd->t_state_lock); in __target_init_cmd()
1462 INIT_WORK(&cmd->work, NULL); in __target_init_cmd()
1463 kref_init(&cmd->cmd_kref); in __target_init_cmd()
1465 cmd->t_task_cdb = &cmd->__t_task_cdb[0]; in __target_init_cmd()
1466 cmd->se_tfo = tfo; in __target_init_cmd()
1467 cmd->se_sess = se_sess; in __target_init_cmd()
1468 cmd->data_length = data_length; in __target_init_cmd()
1469 cmd->data_direction = data_direction; in __target_init_cmd()
1470 cmd->sam_task_attr = task_attr; in __target_init_cmd()
1471 cmd->sense_buffer = sense_buffer; in __target_init_cmd()
1472 cmd->orig_fe_lun = unpacked_lun; in __target_init_cmd()
1473 cmd->cmd_cnt = cmd_cnt; in __target_init_cmd()
1475 if (!(cmd->se_cmd_flags & SCF_USE_CPUID)) in __target_init_cmd()
1476 cmd->cpuid = raw_smp_processor_id(); in __target_init_cmd()
1478 cmd->state_active = false; in __target_init_cmd()
1483 transport_check_alloc_task_attr(struct se_cmd *cmd) in transport_check_alloc_task_attr() argument
1485 struct se_device *dev = cmd->se_dev; in transport_check_alloc_task_attr()
1494 if (cmd->sam_task_attr == TCM_ACA_TAG) { in transport_check_alloc_task_attr()
1504 target_cmd_init_cdb(struct se_cmd *cmd, unsigned char *cdb, gfp_t gfp) in target_cmd_init_cdb() argument
1524 if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) { in target_cmd_init_cdb()
1525 cmd->t_task_cdb = kzalloc(scsi_command_size(cdb), gfp); in target_cmd_init_cdb()
1526 if (!cmd->t_task_cdb) { in target_cmd_init_cdb()
1530 (unsigned long)sizeof(cmd->__t_task_cdb)); in target_cmd_init_cdb()
1538 memcpy(cmd->t_task_cdb, cdb, scsi_command_size(cdb)); in target_cmd_init_cdb()
1540 trace_target_sequencer_start(cmd); in target_cmd_init_cdb()
1548 memcpy(cmd->t_task_cdb, cdb, min(scsi_command_size(cdb), in target_cmd_init_cdb()
1555 target_cmd_parse_cdb(struct se_cmd *cmd) in target_cmd_parse_cdb() argument
1557 struct se_device *dev = cmd->se_dev; in target_cmd_parse_cdb()
1560 ret = dev->transport->parse_cdb(cmd); in target_cmd_parse_cdb()
1563 cmd->se_tfo->fabric_name, in target_cmd_parse_cdb()
1564 cmd->se_sess->se_node_acl->initiatorname, in target_cmd_parse_cdb()
1565 cmd->t_task_cdb[0]); in target_cmd_parse_cdb()
1569 ret = transport_check_alloc_task_attr(cmd); in target_cmd_parse_cdb()
1573 cmd->se_cmd_flags |= SCF_SUPPORTED_SAM_OPCODE; in target_cmd_parse_cdb()
1574 atomic_long_inc(&cmd->se_lun->lun_stats.cmd_pdus); in target_cmd_parse_cdb()
1579 static int __target_submit(struct se_cmd *cmd) in __target_submit() argument
1589 core_alua_check_nonop_delay(cmd); in __target_submit()
1591 if (cmd->t_data_nents != 0) { in __target_submit()
1597 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && in __target_submit()
1598 cmd->data_direction == DMA_FROM_DEVICE) { in __target_submit()
1599 struct scatterlist *sgl = cmd->t_data_sg; in __target_submit()
1612 if (!cmd->se_lun) { in __target_submit()
1626 cmd->t_state = TRANSPORT_NEW_CMD; in __target_submit()
1627 cmd->transport_state |= CMD_T_ACTIVE; in __target_submit()
1634 ret = transport_generic_new_cmd(cmd); in __target_submit()
1636 transport_generic_request_failure(cmd, ret); in __target_submit()
1641 transport_generic_map_mem_to_cmd(struct se_cmd *cmd, struct scatterlist *sgl, in transport_generic_map_mem_to_cmd() argument
1652 if (cmd->se_cmd_flags & SCF_OVERFLOW_BIT) { in transport_generic_map_mem_to_cmd()
1658 cmd->t_data_sg = sgl; in transport_generic_map_mem_to_cmd()
1659 cmd->t_data_nents = sgl_count; in transport_generic_map_mem_to_cmd()
1660 cmd->t_bidi_data_sg = sgl_bidi; in transport_generic_map_mem_to_cmd()
1661 cmd->t_bidi_data_nents = sgl_bidi_count; in transport_generic_map_mem_to_cmd()
1663 cmd->se_cmd_flags |= SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC; in transport_generic_map_mem_to_cmd()
2032 void transport_generic_request_failure(struct se_cmd *cmd, in transport_generic_request_failure() argument
2039 target_show_cmd("-----[ ", cmd); in transport_generic_request_failure()
2044 transport_complete_task_attr(cmd); in transport_generic_request_failure()
2046 if (cmd->transport_complete_callback) in transport_generic_request_failure()
2047 cmd->transport_complete_callback(cmd, false, &post_ret); in transport_generic_request_failure()
2049 if (cmd->transport_state & CMD_T_ABORTED) { in transport_generic_request_failure()
2050 INIT_WORK(&cmd->work, target_abort_work); in transport_generic_request_failure()
2051 queue_work(target_completion_wq, &cmd->work); in transport_generic_request_failure()
2082 cmd->scsi_status = SAM_STAT_TASK_SET_FULL; in transport_generic_request_failure()
2085 cmd->scsi_status = SAM_STAT_BUSY; in transport_generic_request_failure()
2094 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; in transport_generic_request_failure()
2102 if (cmd->se_sess && in transport_generic_request_failure()
2103 cmd->se_dev->dev_attrib.emulate_ua_intlck_ctrl in transport_generic_request_failure()
2105 target_ua_allocate_lun(cmd->se_sess->se_node_acl, in transport_generic_request_failure()
2106 cmd->orig_fe_lun, 0x2C, in transport_generic_request_failure()
2113 cmd->t_task_cdb[0], sense_reason); in transport_generic_request_failure()
2118 ret = transport_send_check_condition_and_sense(cmd, sense_reason, 0); in transport_generic_request_failure()
2123 transport_lun_remove_cmd(cmd); in transport_generic_request_failure()
2124 transport_cmd_check_stop_to_fabric(cmd); in transport_generic_request_failure()
2128 trace_target_cmd_complete(cmd); in transport_generic_request_failure()
2129 ret = cmd->se_tfo->queue_status(cmd); in transport_generic_request_failure()
2133 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); in transport_generic_request_failure()
2137 void __target_execute_cmd(struct se_cmd *cmd, bool do_checks) in __target_execute_cmd() argument
2141 if (!cmd->execute_cmd) { in __target_execute_cmd()
2152 ret = target_scsi3_ua_check(cmd); in __target_execute_cmd()
2156 ret = target_alua_state_check(cmd); in __target_execute_cmd()
2160 ret = target_check_reservation(cmd); in __target_execute_cmd()
2162 cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; in __target_execute_cmd()
2167 ret = cmd->execute_cmd(cmd); in __target_execute_cmd()
2171 spin_lock_irq(&cmd->t_state_lock); in __target_execute_cmd()
2172 cmd->transport_state &= ~CMD_T_SENT; in __target_execute_cmd()
2173 spin_unlock_irq(&cmd->t_state_lock); in __target_execute_cmd()
2175 transport_generic_request_failure(cmd, ret); in __target_execute_cmd()
2178 static int target_write_prot_action(struct se_cmd *cmd) in target_write_prot_action() argument
2186 switch (cmd->prot_op) { in target_write_prot_action()
2188 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_INSERT)) in target_write_prot_action()
2189 sbc_dif_generate(cmd); in target_write_prot_action()
2192 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DOUT_STRIP) in target_write_prot_action()
2195 sectors = cmd->data_length >> ilog2(cmd->se_dev->dev_attrib.block_size); in target_write_prot_action()
2196 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, in target_write_prot_action()
2197 sectors, 0, cmd->t_prot_sg, 0); in target_write_prot_action()
2198 if (unlikely(cmd->pi_err)) { in target_write_prot_action()
2199 spin_lock_irq(&cmd->t_state_lock); in target_write_prot_action()
2200 cmd->transport_state &= ~CMD_T_SENT; in target_write_prot_action()
2201 spin_unlock_irq(&cmd->t_state_lock); in target_write_prot_action()
2202 transport_generic_request_failure(cmd, cmd->pi_err); in target_write_prot_action()
2213 static bool target_handle_task_attr(struct se_cmd *cmd) in target_handle_task_attr() argument
2215 struct se_device *dev = cmd->se_dev; in target_handle_task_attr()
2220 cmd->se_cmd_flags |= SCF_TASK_ATTR_SET; in target_handle_task_attr()
2226 switch (cmd->sam_task_attr) { in target_handle_task_attr()
2230 cmd->t_task_cdb[0]); in target_handle_task_attr()
2236 cmd->t_task_cdb[0]); in target_handle_task_attr()
2249 if (cmd->sam_task_attr != TCM_ORDERED_TAG) { in target_handle_task_attr()
2258 spin_lock_irq(&cmd->t_state_lock); in target_handle_task_attr()
2259 cmd->transport_state &= ~CMD_T_SENT; in target_handle_task_attr()
2260 spin_unlock_irq(&cmd->t_state_lock); in target_handle_task_attr()
2263 list_add_tail(&cmd->se_delayed_node, &dev->delayed_cmd_list); in target_handle_task_attr()
2267 cmd->t_task_cdb[0], cmd->sam_task_attr); in target_handle_task_attr()
2277 void target_execute_cmd(struct se_cmd *cmd) in target_execute_cmd() argument
2285 if (target_cmd_interrupted(cmd)) in target_execute_cmd()
2288 spin_lock_irq(&cmd->t_state_lock); in target_execute_cmd()
2289 cmd->t_state = TRANSPORT_PROCESSING; in target_execute_cmd()
2290 cmd->transport_state |= CMD_T_ACTIVE | CMD_T_SENT; in target_execute_cmd()
2291 spin_unlock_irq(&cmd->t_state_lock); in target_execute_cmd()
2293 if (target_write_prot_action(cmd)) in target_execute_cmd()
2296 if (target_handle_task_attr(cmd)) in target_execute_cmd()
2299 __target_execute_cmd(cmd, true); in target_execute_cmd()
2314 struct se_cmd *cmd; in target_do_delayed_work() local
2319 cmd = list_entry(dev->delayed_cmd_list.next, in target_do_delayed_work()
2322 if (cmd->sam_task_attr == TCM_ORDERED_TAG) { in target_do_delayed_work()
2335 list_del(&cmd->se_delayed_node); in target_do_delayed_work()
2339 if (cmd->sam_task_attr != TCM_ORDERED_TAG) in target_do_delayed_work()
2342 cmd->transport_state |= CMD_T_SENT; in target_do_delayed_work()
2344 __target_execute_cmd(cmd, true); in target_do_delayed_work()
2355 static void transport_complete_task_attr(struct se_cmd *cmd) in transport_complete_task_attr() argument
2357 struct se_device *dev = cmd->se_dev; in transport_complete_task_attr()
2362 if (!(cmd->se_cmd_flags & SCF_TASK_ATTR_SET)) in transport_complete_task_attr()
2365 if (cmd->sam_task_attr == TCM_SIMPLE_TAG) { in transport_complete_task_attr()
2368 } else if (cmd->sam_task_attr == TCM_HEAD_TAG) { in transport_complete_task_attr()
2373 } else if (cmd->sam_task_attr == TCM_ORDERED_TAG) { in transport_complete_task_attr()
2382 cmd->se_cmd_flags &= ~SCF_TASK_ATTR_SET; in transport_complete_task_attr()
2389 static void transport_complete_qf(struct se_cmd *cmd) in transport_complete_qf() argument
2393 transport_complete_task_attr(cmd); in transport_complete_qf()
2403 if (cmd->t_state == TRANSPORT_COMPLETE_QF_ERR) { in transport_complete_qf()
2404 if (cmd->scsi_status) in transport_complete_qf()
2407 translate_sense_reason(cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE); in transport_complete_qf()
2420 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && in transport_complete_qf()
2421 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) in transport_complete_qf()
2424 switch (cmd->data_direction) { in transport_complete_qf()
2427 if (cmd->scsi_status && in transport_complete_qf()
2428 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) in transport_complete_qf()
2431 trace_target_cmd_complete(cmd); in transport_complete_qf()
2432 ret = cmd->se_tfo->queue_data_in(cmd); in transport_complete_qf()
2435 if (cmd->se_cmd_flags & SCF_BIDI) { in transport_complete_qf()
2436 ret = cmd->se_tfo->queue_data_in(cmd); in transport_complete_qf()
2442 trace_target_cmd_complete(cmd); in transport_complete_qf()
2443 ret = cmd->se_tfo->queue_status(cmd); in transport_complete_qf()
2450 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); in transport_complete_qf()
2453 transport_lun_remove_cmd(cmd); in transport_complete_qf()
2454 transport_cmd_check_stop_to_fabric(cmd); in transport_complete_qf()
2457 static void transport_handle_queue_full(struct se_cmd *cmd, struct se_device *dev, in transport_handle_queue_full() argument
2469 cmd->t_state = (write_pending) ? TRANSPORT_COMPLETE_QF_WP : in transport_handle_queue_full()
2473 cmd->t_state = TRANSPORT_COMPLETE_QF_ERR; in transport_handle_queue_full()
2477 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); in transport_handle_queue_full()
2479 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); in transport_handle_queue_full()
2481 schedule_work(&cmd->se_dev->qf_work_queue); in transport_handle_queue_full()
2484 static bool target_read_prot_action(struct se_cmd *cmd) in target_read_prot_action() argument
2486 switch (cmd->prot_op) { in target_read_prot_action()
2488 if (!(cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_STRIP)) { in target_read_prot_action()
2489 u32 sectors = cmd->data_length >> in target_read_prot_action()
2490 ilog2(cmd->se_dev->dev_attrib.block_size); in target_read_prot_action()
2492 cmd->pi_err = sbc_dif_verify(cmd, cmd->t_task_lba, in target_read_prot_action()
2493 sectors, 0, cmd->t_prot_sg, in target_read_prot_action()
2495 if (cmd->pi_err) in target_read_prot_action()
2500 if (cmd->se_sess->sup_prot_ops & TARGET_PROT_DIN_INSERT) in target_read_prot_action()
2503 sbc_dif_generate(cmd); in target_read_prot_action()
2514 struct se_cmd *cmd = container_of(work, struct se_cmd, work); in target_complete_ok_work() local
2522 transport_complete_task_attr(cmd); in target_complete_ok_work()
2528 if (atomic_read(&cmd->se_dev->dev_qf_count) != 0) in target_complete_ok_work()
2529 schedule_work(&cmd->se_dev->qf_work_queue); in target_complete_ok_work()
2540 if (!(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL) && in target_complete_ok_work()
2541 cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { in target_complete_ok_work()
2542 WARN_ON(!cmd->scsi_status); in target_complete_ok_work()
2544 cmd, 0, 1); in target_complete_ok_work()
2548 transport_lun_remove_cmd(cmd); in target_complete_ok_work()
2549 transport_cmd_check_stop_to_fabric(cmd); in target_complete_ok_work()
2556 if (cmd->transport_complete_callback) { in target_complete_ok_work()
2558 bool caw = (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE); in target_complete_ok_work()
2559 bool zero_dl = !(cmd->data_length); in target_complete_ok_work()
2562 rc = cmd->transport_complete_callback(cmd, true, &post_ret); in target_complete_ok_work()
2569 ret = transport_send_check_condition_and_sense(cmd, in target_complete_ok_work()
2574 transport_lun_remove_cmd(cmd); in target_complete_ok_work()
2575 transport_cmd_check_stop_to_fabric(cmd); in target_complete_ok_work()
2581 switch (cmd->data_direction) { in target_complete_ok_work()
2593 if (cmd->scsi_status && in target_complete_ok_work()
2594 !(cmd->se_cmd_flags & SCF_TREAT_READ_AS_NORMAL)) in target_complete_ok_work()
2597 atomic_long_add(cmd->data_length, in target_complete_ok_work()
2598 &cmd->se_lun->lun_stats.tx_data_octets); in target_complete_ok_work()
2604 if (target_read_prot_action(cmd)) { in target_complete_ok_work()
2605 ret = transport_send_check_condition_and_sense(cmd, in target_complete_ok_work()
2606 cmd->pi_err, 0); in target_complete_ok_work()
2610 transport_lun_remove_cmd(cmd); in target_complete_ok_work()
2611 transport_cmd_check_stop_to_fabric(cmd); in target_complete_ok_work()
2615 trace_target_cmd_complete(cmd); in target_complete_ok_work()
2616 ret = cmd->se_tfo->queue_data_in(cmd); in target_complete_ok_work()
2621 atomic_long_add(cmd->data_length, in target_complete_ok_work()
2622 &cmd->se_lun->lun_stats.rx_data_octets); in target_complete_ok_work()
2626 if (cmd->se_cmd_flags & SCF_BIDI) { in target_complete_ok_work()
2627 atomic_long_add(cmd->data_length, in target_complete_ok_work()
2628 &cmd->se_lun->lun_stats.tx_data_octets); in target_complete_ok_work()
2629 ret = cmd->se_tfo->queue_data_in(cmd); in target_complete_ok_work()
2637 trace_target_cmd_complete(cmd); in target_complete_ok_work()
2638 ret = cmd->se_tfo->queue_status(cmd); in target_complete_ok_work()
2646 transport_lun_remove_cmd(cmd); in target_complete_ok_work()
2647 transport_cmd_check_stop_to_fabric(cmd); in target_complete_ok_work()
2652 " data_direction: %d\n", cmd, cmd->data_direction); in target_complete_ok_work()
2654 transport_handle_queue_full(cmd, cmd->se_dev, ret, false); in target_complete_ok_work()
2663 static inline void transport_reset_sgl_orig(struct se_cmd *cmd) in transport_reset_sgl_orig() argument
2669 if (!cmd->t_data_sg_orig) in transport_reset_sgl_orig()
2672 kfree(cmd->t_data_sg); in transport_reset_sgl_orig()
2673 cmd->t_data_sg = cmd->t_data_sg_orig; in transport_reset_sgl_orig()
2674 cmd->t_data_sg_orig = NULL; in transport_reset_sgl_orig()
2675 cmd->t_data_nents = cmd->t_data_nents_orig; in transport_reset_sgl_orig()
2676 cmd->t_data_nents_orig = 0; in transport_reset_sgl_orig()
2679 static inline void transport_free_pages(struct se_cmd *cmd) in transport_free_pages() argument
2681 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { in transport_free_pages()
2682 target_free_sgl(cmd->t_prot_sg, cmd->t_prot_nents); in transport_free_pages()
2683 cmd->t_prot_sg = NULL; in transport_free_pages()
2684 cmd->t_prot_nents = 0; in transport_free_pages()
2687 if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) { in transport_free_pages()
2692 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) { in transport_free_pages()
2693 target_free_sgl(cmd->t_bidi_data_sg, in transport_free_pages()
2694 cmd->t_bidi_data_nents); in transport_free_pages()
2695 cmd->t_bidi_data_sg = NULL; in transport_free_pages()
2696 cmd->t_bidi_data_nents = 0; in transport_free_pages()
2698 transport_reset_sgl_orig(cmd); in transport_free_pages()
2701 transport_reset_sgl_orig(cmd); in transport_free_pages()
2703 target_free_sgl(cmd->t_data_sg, cmd->t_data_nents); in transport_free_pages()
2704 cmd->t_data_sg = NULL; in transport_free_pages()
2705 cmd->t_data_nents = 0; in transport_free_pages()
2707 target_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents); in transport_free_pages()
2708 cmd->t_bidi_data_sg = NULL; in transport_free_pages()
2709 cmd->t_bidi_data_nents = 0; in transport_free_pages()
2712 void *transport_kmap_data_sg(struct se_cmd *cmd) in transport_kmap_data_sg() argument
2714 struct scatterlist *sg = cmd->t_data_sg; in transport_kmap_data_sg()
2723 if (!cmd->t_data_nents) in transport_kmap_data_sg()
2727 if (cmd->t_data_nents == 1) in transport_kmap_data_sg()
2731 pages = kmalloc_array(cmd->t_data_nents, sizeof(*pages), GFP_KERNEL); in transport_kmap_data_sg()
2736 for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { in transport_kmap_data_sg()
2740 cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); in transport_kmap_data_sg()
2742 if (!cmd->t_data_vmap) in transport_kmap_data_sg()
2745 return cmd->t_data_vmap + cmd->t_data_sg[0].offset; in transport_kmap_data_sg()
2749 void transport_kunmap_data_sg(struct se_cmd *cmd) in transport_kunmap_data_sg() argument
2751 if (!cmd->t_data_nents) { in transport_kunmap_data_sg()
2753 } else if (cmd->t_data_nents == 1) { in transport_kunmap_data_sg()
2754 kunmap(sg_page(cmd->t_data_sg)); in transport_kunmap_data_sg()
2758 vunmap(cmd->t_data_vmap); in transport_kunmap_data_sg()
2759 cmd->t_data_vmap = NULL; in transport_kunmap_data_sg()
2780 transport_generic_new_cmd(struct se_cmd *cmd) in transport_generic_new_cmd() argument
2784 bool zero_flag = !(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB); in transport_generic_new_cmd()
2786 if (cmd->prot_op != TARGET_PROT_NORMAL && in transport_generic_new_cmd()
2787 !(cmd->se_cmd_flags & SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC)) { in transport_generic_new_cmd()
2788 ret = target_alloc_sgl(&cmd->t_prot_sg, &cmd->t_prot_nents, in transport_generic_new_cmd()
2789 cmd->prot_length, true, false); in transport_generic_new_cmd()
2799 if (!(cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC) && in transport_generic_new_cmd()
2800 cmd->data_length) { in transport_generic_new_cmd()
2802 if ((cmd->se_cmd_flags & SCF_BIDI) || in transport_generic_new_cmd()
2803 (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE)) { in transport_generic_new_cmd()
2806 if (cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) in transport_generic_new_cmd()
2807 bidi_length = cmd->t_task_nolb * in transport_generic_new_cmd()
2808 cmd->se_dev->dev_attrib.block_size; in transport_generic_new_cmd()
2810 bidi_length = cmd->data_length; in transport_generic_new_cmd()
2812 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, in transport_generic_new_cmd()
2813 &cmd->t_bidi_data_nents, in transport_generic_new_cmd()
2819 ret = target_alloc_sgl(&cmd->t_data_sg, &cmd->t_data_nents, in transport_generic_new_cmd()
2820 cmd->data_length, zero_flag, false); in transport_generic_new_cmd()
2823 } else if ((cmd->se_cmd_flags & SCF_COMPARE_AND_WRITE) && in transport_generic_new_cmd()
2824 cmd->data_length) { in transport_generic_new_cmd()
2829 u32 caw_length = cmd->t_task_nolb * in transport_generic_new_cmd()
2830 cmd->se_dev->dev_attrib.block_size; in transport_generic_new_cmd()
2832 ret = target_alloc_sgl(&cmd->t_bidi_data_sg, in transport_generic_new_cmd()
2833 &cmd->t_bidi_data_nents, in transport_generic_new_cmd()
2843 target_add_to_state_list(cmd); in transport_generic_new_cmd()
2844 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) { in transport_generic_new_cmd()
2845 target_execute_cmd(cmd); in transport_generic_new_cmd()
2849 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_generic_new_cmd()
2850 cmd->t_state = TRANSPORT_WRITE_PENDING; in transport_generic_new_cmd()
2855 if (cmd->transport_state & CMD_T_STOP && in transport_generic_new_cmd()
2856 !cmd->se_tfo->write_pending_must_be_called) { in transport_generic_new_cmd()
2858 __func__, __LINE__, cmd->tag); in transport_generic_new_cmd()
2860 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_generic_new_cmd()
2862 complete_all(&cmd->t_transport_stop_comp); in transport_generic_new_cmd()
2865 cmd->transport_state &= ~CMD_T_ACTIVE; in transport_generic_new_cmd()
2866 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_generic_new_cmd()
2868 ret = cmd->se_tfo->write_pending(cmd); in transport_generic_new_cmd()
2875 pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd); in transport_generic_new_cmd()
2876 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); in transport_generic_new_cmd()
2881 static void transport_write_pending_qf(struct se_cmd *cmd) in transport_write_pending_qf() argument
2887 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_write_pending_qf()
2888 stop = (cmd->transport_state & (CMD_T_STOP | CMD_T_ABORTED)); in transport_write_pending_qf()
2889 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_write_pending_qf()
2893 __func__, __LINE__, cmd->tag); in transport_write_pending_qf()
2894 complete_all(&cmd->t_transport_stop_comp); in transport_write_pending_qf()
2898 ret = cmd->se_tfo->write_pending(cmd); in transport_write_pending_qf()
2901 cmd); in transport_write_pending_qf()
2902 transport_handle_queue_full(cmd, cmd->se_dev, ret, true); in transport_write_pending_qf()
2910 static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas) in target_wait_free_cmd() argument
2914 spin_lock_irqsave(&cmd->t_state_lock, flags); in target_wait_free_cmd()
2915 __transport_wait_for_tasks(cmd, true, aborted, tas, &flags); in target_wait_free_cmd()
2916 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in target_wait_free_cmd()
2923 void target_put_cmd_and_wait(struct se_cmd *cmd) in target_put_cmd_and_wait() argument
2927 WARN_ON_ONCE(cmd->abrt_compl); in target_put_cmd_and_wait()
2928 cmd->abrt_compl = &compl; in target_put_cmd_and_wait()
2929 target_put_sess_cmd(cmd); in target_put_cmd_and_wait()
2956 int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks) in transport_generic_free_cmd() argument
2963 target_wait_free_cmd(cmd, &aborted, &tas); in transport_generic_free_cmd()
2965 if (cmd->se_cmd_flags & SCF_SE_LUN_CMD) { in transport_generic_free_cmd()
2971 if (cmd->state_active) in transport_generic_free_cmd()
2972 target_remove_from_state_list(cmd); in transport_generic_free_cmd()
2974 if (cmd->se_lun) in transport_generic_free_cmd()
2975 transport_lun_remove_cmd(cmd); in transport_generic_free_cmd()
2978 cmd->free_compl = &compl; in transport_generic_free_cmd()
2979 ret = target_put_sess_cmd(cmd); in transport_generic_free_cmd()
2981 pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag); in transport_generic_free_cmd()
3023 static void target_free_cmd_mem(struct se_cmd *cmd) in target_free_cmd_mem() argument
3025 transport_free_pages(cmd); in target_free_cmd_mem()
3027 if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) in target_free_cmd_mem()
3028 core_tmr_release_req(cmd->se_tmr_req); in target_free_cmd_mem()
3029 if (cmd->t_task_cdb != cmd->__t_task_cdb) in target_free_cmd_mem()
3030 kfree(cmd->t_task_cdb); in target_free_cmd_mem()
3143 void target_show_cmd(const char *pfx, struct se_cmd *cmd) in target_show_cmd() argument
3145 char *ts_str = target_ts_to_str(cmd->transport_state); in target_show_cmd()
3146 const u8 *cdb = cmd->t_task_cdb; in target_show_cmd()
3147 struct se_tmr_req *tmf = cmd->se_tmr_req; in target_show_cmd()
3149 if (!(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) { in target_show_cmd()
3151 pfx, cdb[0], cdb[1], cmd->tag, in target_show_cmd()
3152 data_dir_name(cmd->data_direction), in target_show_cmd()
3153 cmd->se_tfo->get_cmd_state(cmd), in target_show_cmd()
3154 cmd_state_name(cmd->t_state), cmd->data_length, in target_show_cmd()
3155 kref_read(&cmd->cmd_kref), ts_str); in target_show_cmd()
3158 pfx, target_tmf_name(tmf->function), cmd->tag, in target_show_cmd()
3159 tmf->ref_task_tag, cmd->se_tfo->get_cmd_state(cmd), in target_show_cmd()
3160 cmd_state_name(cmd->t_state), in target_show_cmd()
3161 kref_read(&cmd->cmd_kref), ts_str); in target_show_cmd()
3241 __transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop, in __transport_wait_for_tasks() argument
3243 __releases(&cmd->t_state_lock) in __transport_wait_for_tasks()
3244 __acquires(&cmd->t_state_lock) in __transport_wait_for_tasks()
3246 lockdep_assert_held(&cmd->t_state_lock); in __transport_wait_for_tasks()
3249 cmd->transport_state |= CMD_T_FABRIC_STOP; in __transport_wait_for_tasks()
3251 if (cmd->transport_state & CMD_T_ABORTED) in __transport_wait_for_tasks()
3254 if (cmd->transport_state & CMD_T_TAS) in __transport_wait_for_tasks()
3257 if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && in __transport_wait_for_tasks()
3258 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) in __transport_wait_for_tasks()
3261 if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && in __transport_wait_for_tasks()
3262 !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) in __transport_wait_for_tasks()
3265 if (!(cmd->transport_state & CMD_T_ACTIVE)) in __transport_wait_for_tasks()
3271 cmd->transport_state |= CMD_T_STOP; in __transport_wait_for_tasks()
3273 target_show_cmd("wait_for_tasks: Stopping ", cmd); in __transport_wait_for_tasks()
3275 spin_unlock_irqrestore(&cmd->t_state_lock, *flags); in __transport_wait_for_tasks()
3277 while (!wait_for_completion_timeout(&cmd->t_transport_stop_comp, in __transport_wait_for_tasks()
3279 target_show_cmd("wait for tasks: ", cmd); in __transport_wait_for_tasks()
3281 spin_lock_irqsave(&cmd->t_state_lock, *flags); in __transport_wait_for_tasks()
3282 cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP); in __transport_wait_for_tasks()
3285 "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag); in __transport_wait_for_tasks()
3294 bool transport_wait_for_tasks(struct se_cmd *cmd) in transport_wait_for_tasks() argument
3299 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_wait_for_tasks()
3300 ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags); in transport_wait_for_tasks()
3301 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_wait_for_tasks()
3495 static void translate_sense_reason(struct se_cmd *cmd, sense_reason_t reason) in translate_sense_reason() argument
3498 u8 *buffer = cmd->sense_buffer; in translate_sense_reason()
3501 bool desc_format = target_sense_desc_format(cmd->se_dev); in translate_sense_reason()
3511 if (!core_scsi3_ua_for_check_condition(cmd, &key, &asc, in translate_sense_reason()
3513 cmd->scsi_status = SAM_STAT_BUSY; in translate_sense_reason()
3522 cmd->se_cmd_flags |= SCF_EMULATED_TASK_SENSE; in translate_sense_reason()
3523 cmd->scsi_status = SAM_STAT_CHECK_CONDITION; in translate_sense_reason()
3524 cmd->scsi_sense_length = TRANSPORT_SENSE_BUFFER; in translate_sense_reason()
3528 cmd->scsi_sense_length, in translate_sense_reason()
3529 cmd->sense_info) < 0); in translate_sense_reason()
3533 transport_send_check_condition_and_sense(struct se_cmd *cmd, in transport_send_check_condition_and_sense() argument
3538 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); in transport_send_check_condition_and_sense()
3540 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_send_check_condition_and_sense()
3541 if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { in transport_send_check_condition_and_sense()
3542 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_send_check_condition_and_sense()
3545 cmd->se_cmd_flags |= SCF_SENT_CHECK_CONDITION; in transport_send_check_condition_and_sense()
3546 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_send_check_condition_and_sense()
3549 translate_sense_reason(cmd, reason); in transport_send_check_condition_and_sense()
3551 trace_target_cmd_complete(cmd); in transport_send_check_condition_and_sense()
3552 return cmd->se_tfo->queue_status(cmd); in transport_send_check_condition_and_sense()
3562 int target_send_busy(struct se_cmd *cmd) in target_send_busy() argument
3564 WARN_ON_ONCE(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB); in target_send_busy()
3566 cmd->scsi_status = SAM_STAT_BUSY; in target_send_busy()
3567 trace_target_cmd_complete(cmd); in target_send_busy()
3568 return cmd->se_tfo->queue_status(cmd); in target_send_busy()
3574 struct se_cmd *cmd = container_of(work, struct se_cmd, work); in target_tmr_work() local
3575 struct se_device *dev = cmd->se_dev; in target_tmr_work()
3576 struct se_tmr_req *tmr = cmd->se_tmr_req; in target_tmr_work()
3579 if (cmd->transport_state & CMD_T_ABORTED) in target_tmr_work()
3584 core_tmr_abort_task(dev, tmr, cmd->se_sess); in target_tmr_work()
3613 if (cmd->transport_state & CMD_T_ABORTED) in target_tmr_work()
3616 cmd->se_tfo->queue_tm_rsp(cmd); in target_tmr_work()
3618 transport_lun_remove_cmd(cmd); in target_tmr_work()
3619 transport_cmd_check_stop_to_fabric(cmd); in target_tmr_work()
3623 target_handle_abort(cmd); in target_tmr_work()
3627 struct se_cmd *cmd) in transport_generic_handle_tmr() argument
3632 spin_lock_irqsave(&cmd->se_dev->se_tmr_lock, flags); in transport_generic_handle_tmr()
3633 list_add_tail(&cmd->se_tmr_req->tmr_list, &cmd->se_dev->dev_tmr_list); in transport_generic_handle_tmr()
3634 spin_unlock_irqrestore(&cmd->se_dev->se_tmr_lock, flags); in transport_generic_handle_tmr()
3636 spin_lock_irqsave(&cmd->t_state_lock, flags); in transport_generic_handle_tmr()
3637 if (cmd->transport_state & CMD_T_ABORTED) { in transport_generic_handle_tmr()
3640 cmd->t_state = TRANSPORT_ISTATE_PROCESSING; in transport_generic_handle_tmr()
3641 cmd->transport_state |= CMD_T_ACTIVE; in transport_generic_handle_tmr()
3643 spin_unlock_irqrestore(&cmd->t_state_lock, flags); in transport_generic_handle_tmr()
3647 cmd->se_tmr_req->function, in transport_generic_handle_tmr()
3648 cmd->se_tmr_req->ref_task_tag, cmd->tag); in transport_generic_handle_tmr()
3649 target_handle_abort(cmd); in transport_generic_handle_tmr()
3653 INIT_WORK(&cmd->work, target_tmr_work); in transport_generic_handle_tmr()
3654 schedule_work(&cmd->work); in transport_generic_handle_tmr()