Lines Matching refs:tfcp_req
286 struct fcloop_fcpreq *tfcp_req; member
539 struct fcloop_fcpreq *tfcp_req = in fcloop_tfcp_req_free() local
542 kfree(tfcp_req); in fcloop_tfcp_req_free()
546 fcloop_tfcp_req_put(struct fcloop_fcpreq *tfcp_req) in fcloop_tfcp_req_put() argument
548 kref_put(&tfcp_req->ref, fcloop_tfcp_req_free); in fcloop_tfcp_req_put()
552 fcloop_tfcp_req_get(struct fcloop_fcpreq *tfcp_req) in fcloop_tfcp_req_get() argument
554 return kref_get_unless_zero(&tfcp_req->ref); in fcloop_tfcp_req_get()
559 struct fcloop_fcpreq *tfcp_req, int status) in fcloop_call_host_done() argument
566 inireq->tfcp_req = NULL; in fcloop_call_host_done()
574 fcloop_tfcp_req_put(tfcp_req); in fcloop_call_host_done()
591 static int check_for_drop(struct fcloop_fcpreq *tfcp_req) in check_for_drop() argument
593 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; in check_for_drop()
624 struct fcloop_fcpreq *tfcp_req = in fcloop_fcp_recv_work() local
626 struct nvmefc_fcp_req *fcpreq = tfcp_req->fcpreq; in fcloop_fcp_recv_work()
631 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_recv_work()
632 switch (tfcp_req->inistate) { in fcloop_fcp_recv_work()
634 tfcp_req->inistate = INI_IO_ACTIVE; in fcloop_fcp_recv_work()
640 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_recv_work()
644 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_recv_work()
649 if (likely(!check_for_drop(tfcp_req))) in fcloop_fcp_recv_work()
650 ret = nvmet_fc_rcv_fcp_req(tfcp_req->tport->targetport, in fcloop_fcp_recv_work()
651 &tfcp_req->tgt_fcp_req, in fcloop_fcp_recv_work()
657 fcloop_call_host_done(fcpreq, tfcp_req, ret); in fcloop_fcp_recv_work()
663 struct fcloop_fcpreq *tfcp_req = in fcloop_fcp_abort_recv_work() local
669 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_abort_recv_work()
670 fcpreq = tfcp_req->fcpreq; in fcloop_fcp_abort_recv_work()
671 switch (tfcp_req->inistate) { in fcloop_fcp_abort_recv_work()
678 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_abort_recv_work()
682 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_abort_recv_work()
686 fcloop_tfcp_req_put(tfcp_req); in fcloop_fcp_abort_recv_work()
690 if (tfcp_req->tport->targetport) in fcloop_fcp_abort_recv_work()
691 nvmet_fc_rcv_fcp_abort(tfcp_req->tport->targetport, in fcloop_fcp_abort_recv_work()
692 &tfcp_req->tgt_fcp_req); in fcloop_fcp_abort_recv_work()
694 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_abort_recv_work()
695 tfcp_req->fcpreq = NULL; in fcloop_fcp_abort_recv_work()
696 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_abort_recv_work()
698 fcloop_call_host_done(fcpreq, tfcp_req, -ECANCELED); in fcloop_fcp_abort_recv_work()
709 struct fcloop_fcpreq *tfcp_req = in fcloop_tgt_fcprqst_done_work() local
714 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_tgt_fcprqst_done_work()
715 fcpreq = tfcp_req->fcpreq; in fcloop_tgt_fcprqst_done_work()
716 tfcp_req->inistate = INI_IO_COMPLETED; in fcloop_tgt_fcprqst_done_work()
717 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_tgt_fcprqst_done_work()
719 fcloop_call_host_done(fcpreq, tfcp_req, tfcp_req->status); in fcloop_tgt_fcprqst_done_work()
731 struct fcloop_fcpreq *tfcp_req; in fcloop_fcp_req() local
736 tfcp_req = kzalloc(sizeof(*tfcp_req), GFP_ATOMIC); in fcloop_fcp_req()
737 if (!tfcp_req) in fcloop_fcp_req()
741 inireq->tfcp_req = tfcp_req; in fcloop_fcp_req()
744 tfcp_req->fcpreq = fcpreq; in fcloop_fcp_req()
745 tfcp_req->tport = rport->targetport->private; in fcloop_fcp_req()
746 tfcp_req->inistate = INI_IO_START; in fcloop_fcp_req()
747 spin_lock_init(&tfcp_req->reqlock); in fcloop_fcp_req()
748 INIT_WORK(&tfcp_req->fcp_rcv_work, fcloop_fcp_recv_work); in fcloop_fcp_req()
749 INIT_WORK(&tfcp_req->abort_rcv_work, fcloop_fcp_abort_recv_work); in fcloop_fcp_req()
750 INIT_WORK(&tfcp_req->tio_done_work, fcloop_tgt_fcprqst_done_work); in fcloop_fcp_req()
751 kref_init(&tfcp_req->ref); in fcloop_fcp_req()
753 queue_work(nvmet_wq, &tfcp_req->fcp_rcv_work); in fcloop_fcp_req()
816 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); in fcloop_fcp_op() local
823 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_op()
824 fcpreq = tfcp_req->fcpreq; in fcloop_fcp_op()
825 active = tfcp_req->active; in fcloop_fcp_op()
826 aborted = tfcp_req->aborted; in fcloop_fcp_op()
827 tfcp_req->active = true; in fcloop_fcp_op()
828 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_op()
836 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_op()
837 tfcp_req->active = false; in fcloop_fcp_op()
838 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_op()
887 tfcp_req->status = 0; in fcloop_fcp_op()
895 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_op()
896 tfcp_req->active = false; in fcloop_fcp_op()
897 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_op()
910 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); in fcloop_tgt_fcp_abort() local
918 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_tgt_fcp_abort()
919 tfcp_req->aborted = true; in fcloop_tgt_fcp_abort()
920 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_tgt_fcp_abort()
922 tfcp_req->status = NVME_SC_INTERNAL; in fcloop_tgt_fcp_abort()
935 struct fcloop_fcpreq *tfcp_req = tgt_fcp_req_to_fcpreq(tgt_fcpreq); in fcloop_fcp_req_release() local
937 queue_work(nvmet_wq, &tfcp_req->tio_done_work); in fcloop_fcp_req_release()
960 struct fcloop_fcpreq *tfcp_req; in fcloop_fcp_abort() local
965 tfcp_req = inireq->tfcp_req; in fcloop_fcp_abort()
966 if (tfcp_req) in fcloop_fcp_abort()
967 fcloop_tfcp_req_get(tfcp_req); in fcloop_fcp_abort()
970 if (!tfcp_req) in fcloop_fcp_abort()
975 spin_lock_irqsave(&tfcp_req->reqlock, flags); in fcloop_fcp_abort()
976 switch (tfcp_req->inistate) { in fcloop_fcp_abort()
979 tfcp_req->inistate = INI_IO_ABORTED; in fcloop_fcp_abort()
985 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_abort()
989 spin_unlock_irqrestore(&tfcp_req->reqlock, flags); in fcloop_fcp_abort()
993 WARN_ON(!queue_work(nvmet_wq, &tfcp_req->abort_rcv_work)); in fcloop_fcp_abort()
999 fcloop_tfcp_req_put(tfcp_req); in fcloop_fcp_abort()