Lines Matching +full:supports +full:- +full:cqe
1 // SPDX-License-Identifier: GPL-2.0
8 #include <linux/blk-mq.h>
15 #include <linux/nvme-fc-driver.h>
16 #include <linux/nvme-fc.h>
32 struct list_head ls_rcv_list; /* tgtport->ls_rcv_list */
55 struct list_head lsreq_list; /* tgtport->ls_req_list */
60 /* desired maximum for a single sequence - if sg list allows it */
93 struct list_head fcp_list; /* tgtport->fcp_list */
179 return (iodptr - iodptr->tgtport->iod); in nvmet_fc_iodnum()
185 return (fodptr - fodptr->queue->fod); in nvmet_fc_fodnum()
201 #define NVMET_FC_QUEUEID_MASK ((u64)((1 << BYTES_FOR_QID_SHIFT) - 1))
206 return (assoc->association_id | qid); in nvmet_fc_makeconnid()
267 /* *********************** FC-NVME DMA Handling **************************** */
332 s->dma_address = 0L; in fc_map_sg()
334 s->dma_length = s->length; in fc_map_sg()
356 /* ********************** FC-NVME LS XMT Handling ************************* */
362 struct nvmet_fc_tgtport *tgtport = lsop->tgtport; in __nvmet_fc_finish_ls_req()
363 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_finish_ls_req()
366 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
368 if (!lsop->req_queued) { in __nvmet_fc_finish_ls_req()
369 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
373 list_del(&lsop->lsreq_list); in __nvmet_fc_finish_ls_req()
375 lsop->req_queued = false; in __nvmet_fc_finish_ls_req()
377 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_finish_ls_req()
379 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_finish_ls_req()
380 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_finish_ls_req()
384 queue_work(nvmet_wq, &tgtport->put_work); in __nvmet_fc_finish_ls_req()
392 struct nvmefc_ls_req *lsreq = &lsop->ls_req; in __nvmet_fc_send_ls_req()
396 if (!tgtport->ops->ls_req) in __nvmet_fc_send_ls_req()
397 return -EOPNOTSUPP; in __nvmet_fc_send_ls_req()
400 return -ESHUTDOWN; in __nvmet_fc_send_ls_req()
402 lsreq->done = done; in __nvmet_fc_send_ls_req()
403 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
404 INIT_LIST_HEAD(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
406 lsreq->rqstdma = fc_dma_map_single(tgtport->dev, lsreq->rqstaddr, in __nvmet_fc_send_ls_req()
407 lsreq->rqstlen + lsreq->rsplen, in __nvmet_fc_send_ls_req()
409 if (fc_dma_mapping_error(tgtport->dev, lsreq->rqstdma)) { in __nvmet_fc_send_ls_req()
410 ret = -EFAULT; in __nvmet_fc_send_ls_req()
413 lsreq->rspdma = lsreq->rqstdma + lsreq->rqstlen; in __nvmet_fc_send_ls_req()
415 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
417 list_add_tail(&lsop->lsreq_list, &tgtport->ls_req_list); in __nvmet_fc_send_ls_req()
419 lsop->req_queued = true; in __nvmet_fc_send_ls_req()
421 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
423 ret = tgtport->ops->ls_req(&tgtport->fc_target_port, lsop->hosthandle, in __nvmet_fc_send_ls_req()
431 lsop->ls_error = ret; in __nvmet_fc_send_ls_req()
432 spin_lock_irqsave(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
433 lsop->req_queued = false; in __nvmet_fc_send_ls_req()
434 list_del(&lsop->lsreq_list); in __nvmet_fc_send_ls_req()
435 spin_unlock_irqrestore(&tgtport->lock, flags); in __nvmet_fc_send_ls_req()
436 fc_dma_unmap_single(tgtport->dev, lsreq->rqstdma, in __nvmet_fc_send_ls_req()
437 (lsreq->rqstlen + lsreq->rsplen), in __nvmet_fc_send_ls_req()
463 /* fc-nvme target doesn't care about success or failure of cmd */ in nvmet_fc_disconnect_assoc_done()
469 * This routine sends a FC-NVME LS to disconnect (aka terminate)
470 * the FC-NVME Association. Terminating the association also
471 * terminates the FC-NVME connections (per queue, both admin and io
473 * down, and the related FC-NVME Association ID and Connection IDs
476 * The behavior of the fc-nvme target is such that it's
479 * connectivity with the fc-nvme host, so the target may never get a
482 * continue on with terminating the association. If the fc-nvme host
488 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_xmt_disconnect_assoc()
500 if (!tgtport->ops->ls_req || assoc->hostport->invalid) in nvmet_fc_xmt_disconnect_assoc()
505 tgtport->ops->lsrqst_priv_sz), GFP_KERNEL); in nvmet_fc_xmt_disconnect_assoc()
507 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
509 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_xmt_disconnect_assoc()
515 lsreq = &lsop->ls_req; in nvmet_fc_xmt_disconnect_assoc()
516 if (tgtport->ops->lsrqst_priv_sz) in nvmet_fc_xmt_disconnect_assoc()
517 lsreq->private = (void *)&discon_acc[1]; in nvmet_fc_xmt_disconnect_assoc()
519 lsreq->private = NULL; in nvmet_fc_xmt_disconnect_assoc()
521 lsop->tgtport = tgtport; in nvmet_fc_xmt_disconnect_assoc()
522 lsop->hosthandle = assoc->hostport->hosthandle; in nvmet_fc_xmt_disconnect_assoc()
525 assoc->association_id); in nvmet_fc_xmt_disconnect_assoc()
530 dev_info(tgtport->dev, in nvmet_fc_xmt_disconnect_assoc()
532 tgtport->fc_target_port.port_num, assoc->a_id, ret); in nvmet_fc_xmt_disconnect_assoc()
538 /* *********************** FC-NVME Port Management ************************ */
550 return -ENOMEM; in nvmet_fc_alloc_ls_iodlist()
552 tgtport->iod = iod; in nvmet_fc_alloc_ls_iodlist()
555 INIT_WORK(&iod->work, nvmet_fc_handle_ls_rqst_work); in nvmet_fc_alloc_ls_iodlist()
556 iod->tgtport = tgtport; in nvmet_fc_alloc_ls_iodlist()
557 list_add_tail(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
559 iod->rqstbuf = kzalloc(sizeof(union nvmefc_ls_requests) + in nvmet_fc_alloc_ls_iodlist()
562 if (!iod->rqstbuf) in nvmet_fc_alloc_ls_iodlist()
565 iod->rspbuf = (union nvmefc_ls_responses *)&iod->rqstbuf[1]; in nvmet_fc_alloc_ls_iodlist()
567 iod->rspdma = fc_dma_map_single(tgtport->dev, iod->rspbuf, in nvmet_fc_alloc_ls_iodlist()
568 sizeof(*iod->rspbuf), in nvmet_fc_alloc_ls_iodlist()
570 if (fc_dma_mapping_error(tgtport->dev, iod->rspdma)) in nvmet_fc_alloc_ls_iodlist()
577 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
578 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
579 for (iod--, i--; i >= 0; iod--, i--) { in nvmet_fc_alloc_ls_iodlist()
580 fc_dma_unmap_single(tgtport->dev, iod->rspdma, in nvmet_fc_alloc_ls_iodlist()
581 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_alloc_ls_iodlist()
582 kfree(iod->rqstbuf); in nvmet_fc_alloc_ls_iodlist()
583 list_del(&iod->ls_rcv_list); in nvmet_fc_alloc_ls_iodlist()
588 return -EFAULT; in nvmet_fc_alloc_ls_iodlist()
594 struct nvmet_fc_ls_iod *iod = tgtport->iod; in nvmet_fc_free_ls_iodlist()
598 fc_dma_unmap_single(tgtport->dev, in nvmet_fc_free_ls_iodlist()
599 iod->rspdma, sizeof(*iod->rspbuf), in nvmet_fc_free_ls_iodlist()
601 kfree(iod->rqstbuf); in nvmet_fc_free_ls_iodlist()
602 list_del(&iod->ls_rcv_list); in nvmet_fc_free_ls_iodlist()
604 kfree(tgtport->iod); in nvmet_fc_free_ls_iodlist()
613 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
614 iod = list_first_entry_or_null(&tgtport->ls_rcv_list, in nvmet_fc_alloc_ls_iod()
617 list_move_tail(&iod->ls_rcv_list, &tgtport->ls_busylist); in nvmet_fc_alloc_ls_iod()
618 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_ls_iod()
629 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
630 list_move(&iod->ls_rcv_list, &tgtport->ls_rcv_list); in nvmet_fc_free_ls_iod()
631 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_free_ls_iod()
638 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_prep_fcp_iodlist()
641 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_prep_fcp_iodlist()
642 INIT_WORK(&fod->defer_work, nvmet_fc_fcp_rqst_op_defer_work); in nvmet_fc_prep_fcp_iodlist()
643 fod->tgtport = tgtport; in nvmet_fc_prep_fcp_iodlist()
644 fod->queue = queue; in nvmet_fc_prep_fcp_iodlist()
645 fod->active = false; in nvmet_fc_prep_fcp_iodlist()
646 fod->abort = false; in nvmet_fc_prep_fcp_iodlist()
647 fod->aborted = false; in nvmet_fc_prep_fcp_iodlist()
648 fod->fcpreq = NULL; in nvmet_fc_prep_fcp_iodlist()
649 list_add_tail(&fod->fcp_list, &queue->fod_list); in nvmet_fc_prep_fcp_iodlist()
650 spin_lock_init(&fod->flock); in nvmet_fc_prep_fcp_iodlist()
652 fod->rspdma = fc_dma_map_single(tgtport->dev, &fod->rspiubuf, in nvmet_fc_prep_fcp_iodlist()
653 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_iodlist()
654 if (fc_dma_mapping_error(tgtport->dev, fod->rspdma)) { in nvmet_fc_prep_fcp_iodlist()
655 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
656 for (fod--, i--; i >= 0; fod--, i--) { in nvmet_fc_prep_fcp_iodlist()
657 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_iodlist()
658 sizeof(fod->rspiubuf), in nvmet_fc_prep_fcp_iodlist()
660 fod->rspdma = 0L; in nvmet_fc_prep_fcp_iodlist()
661 list_del(&fod->fcp_list); in nvmet_fc_prep_fcp_iodlist()
673 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_destroy_fcp_iodlist()
676 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_destroy_fcp_iodlist()
677 if (fod->rspdma) in nvmet_fc_destroy_fcp_iodlist()
678 fc_dma_unmap_single(tgtport->dev, fod->rspdma, in nvmet_fc_destroy_fcp_iodlist()
679 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_destroy_fcp_iodlist()
688 lockdep_assert_held(&queue->qlock); in nvmet_fc_alloc_fcp_iod()
690 fod = list_first_entry_or_null(&queue->fod_list, in nvmet_fc_alloc_fcp_iod()
693 list_del(&fod->fcp_list); in nvmet_fc_alloc_fcp_iod()
694 fod->active = true; in nvmet_fc_alloc_fcp_iod()
710 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_queue_fcp_req()
716 fcpreq->hwqid = queue->qid ? in nvmet_fc_queue_fcp_req()
717 ((queue->qid - 1) % tgtport->ops->max_hw_queues) : 0; in nvmet_fc_queue_fcp_req()
729 nvmet_fc_queue_fcp_req(fod->tgtport, fod->queue, fod->fcpreq); in nvmet_fc_fcp_rqst_op_defer_work()
737 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_free_fcp_iod()
738 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_free_fcp_iod()
742 fc_dma_sync_single_for_cpu(tgtport->dev, fod->rspdma, in nvmet_fc_free_fcp_iod()
743 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_free_fcp_iod()
745 fcpreq->nvmet_fc_private = NULL; in nvmet_fc_free_fcp_iod()
747 fod->active = false; in nvmet_fc_free_fcp_iod()
748 fod->abort = false; in nvmet_fc_free_fcp_iod()
749 fod->aborted = false; in nvmet_fc_free_fcp_iod()
750 fod->writedataactive = false; in nvmet_fc_free_fcp_iod()
751 fod->fcpreq = NULL; in nvmet_fc_free_fcp_iod()
753 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
758 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
759 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_free_fcp_iod()
762 list_add_tail(&fod->fcp_list, &fod->queue->fod_list); in nvmet_fc_free_fcp_iod()
763 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
767 /* Re-use the fod for the next pending cmd that was deferred */ in nvmet_fc_free_fcp_iod()
768 list_del(&deferfcp->req_list); in nvmet_fc_free_fcp_iod()
770 fcpreq = deferfcp->fcp_req; in nvmet_fc_free_fcp_iod()
773 list_add_tail(&deferfcp->req_list, &queue->avail_defer_list); in nvmet_fc_free_fcp_iod()
775 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_free_fcp_iod()
778 memcpy(&fod->cmdiubuf, fcpreq->rspaddr, fcpreq->rsplen); in nvmet_fc_free_fcp_iod()
781 fcpreq->rspaddr = NULL; in nvmet_fc_free_fcp_iod()
782 fcpreq->rsplen = 0; in nvmet_fc_free_fcp_iod()
783 fcpreq->nvmet_fc_private = fod; in nvmet_fc_free_fcp_iod()
784 fod->fcpreq = fcpreq; in nvmet_fc_free_fcp_iod()
785 fod->active = true; in nvmet_fc_free_fcp_iod()
788 tgtport->ops->defer_rcv(&tgtport->fc_target_port, fcpreq); in nvmet_fc_free_fcp_iod()
795 queue_work(queue->work_q, &fod->defer_work); in nvmet_fc_free_fcp_iod()
812 queue->work_q = alloc_workqueue("ntfc%d.%d.%d", 0, 0, in nvmet_fc_alloc_target_queue()
813 assoc->tgtport->fc_target_port.port_num, in nvmet_fc_alloc_target_queue()
814 assoc->a_id, qid); in nvmet_fc_alloc_target_queue()
815 if (!queue->work_q) in nvmet_fc_alloc_target_queue()
818 queue->qid = qid; in nvmet_fc_alloc_target_queue()
819 queue->sqsize = sqsize; in nvmet_fc_alloc_target_queue()
820 queue->assoc = assoc; in nvmet_fc_alloc_target_queue()
821 INIT_LIST_HEAD(&queue->fod_list); in nvmet_fc_alloc_target_queue()
822 INIT_LIST_HEAD(&queue->avail_defer_list); in nvmet_fc_alloc_target_queue()
823 INIT_LIST_HEAD(&queue->pending_cmd_list); in nvmet_fc_alloc_target_queue()
824 atomic_set(&queue->connected, 0); in nvmet_fc_alloc_target_queue()
825 atomic_set(&queue->sqtail, 0); in nvmet_fc_alloc_target_queue()
826 atomic_set(&queue->rsn, 1); in nvmet_fc_alloc_target_queue()
827 atomic_set(&queue->zrspcnt, 0); in nvmet_fc_alloc_target_queue()
828 spin_lock_init(&queue->qlock); in nvmet_fc_alloc_target_queue()
829 kref_init(&queue->ref); in nvmet_fc_alloc_target_queue()
831 nvmet_fc_prep_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
833 ret = nvmet_sq_init(&queue->nvme_sq); in nvmet_fc_alloc_target_queue()
837 WARN_ON(assoc->queues[qid]); in nvmet_fc_alloc_target_queue()
838 assoc->queues[qid] = queue; in nvmet_fc_alloc_target_queue()
843 nvmet_fc_destroy_fcp_iodlist(assoc->tgtport, queue); in nvmet_fc_alloc_target_queue()
844 destroy_workqueue(queue->work_q); in nvmet_fc_alloc_target_queue()
857 nvmet_fc_destroy_fcp_iodlist(queue->assoc->tgtport, queue); in nvmet_fc_tgt_queue_free()
859 destroy_workqueue(queue->work_q); in nvmet_fc_tgt_queue_free()
867 kref_put(&queue->ref, nvmet_fc_tgt_queue_free); in nvmet_fc_tgt_q_put()
873 return kref_get_unless_zero(&queue->ref); in nvmet_fc_tgt_q_get()
880 struct nvmet_fc_tgtport *tgtport = queue->assoc->tgtport; in nvmet_fc_delete_target_queue()
881 struct nvmet_fc_fcp_iod *fod = queue->fod; in nvmet_fc_delete_target_queue()
887 disconnect = atomic_xchg(&queue->connected, 0); in nvmet_fc_delete_target_queue()
893 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
895 for (i = 0; i < queue->sqsize; fod++, i++) { in nvmet_fc_delete_target_queue()
896 if (fod->active) { in nvmet_fc_delete_target_queue()
897 spin_lock(&fod->flock); in nvmet_fc_delete_target_queue()
898 fod->abort = true; in nvmet_fc_delete_target_queue()
904 if (fod->writedataactive) { in nvmet_fc_delete_target_queue()
905 fod->aborted = true; in nvmet_fc_delete_target_queue()
906 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
907 tgtport->ops->fcp_abort( in nvmet_fc_delete_target_queue()
908 &tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_delete_target_queue()
910 spin_unlock(&fod->flock); in nvmet_fc_delete_target_queue()
915 list_for_each_entry_safe(deferfcp, tempptr, &queue->avail_defer_list, in nvmet_fc_delete_target_queue()
917 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
922 deferfcp = list_first_entry_or_null(&queue->pending_cmd_list, in nvmet_fc_delete_target_queue()
927 list_del(&deferfcp->req_list); in nvmet_fc_delete_target_queue()
928 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
930 tgtport->ops->defer_rcv(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
931 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
933 tgtport->ops->fcp_abort(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
934 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
936 tgtport->ops->fcp_req_release(&tgtport->fc_target_port, in nvmet_fc_delete_target_queue()
937 deferfcp->fcp_req); in nvmet_fc_delete_target_queue()
944 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
946 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_delete_target_queue()
948 flush_workqueue(queue->work_q); in nvmet_fc_delete_target_queue()
950 nvmet_sq_destroy(&queue->nvme_sq); in nvmet_fc_delete_target_queue()
968 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_queue()
969 if (association_id == assoc->association_id) { in nvmet_fc_find_target_queue()
970 queue = assoc->queues[qid]; in nvmet_fc_find_target_queue()
972 (!atomic_read(&queue->connected) || in nvmet_fc_find_target_queue()
988 struct nvmet_fc_tgtport *tgtport = hostport->tgtport; in nvmet_fc_hostport_free()
991 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_hostport_free()
992 list_del(&hostport->host_list); in nvmet_fc_hostport_free()
993 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_hostport_free()
994 if (tgtport->ops->host_release && hostport->invalid) in nvmet_fc_hostport_free()
995 tgtport->ops->host_release(hostport->hosthandle); in nvmet_fc_hostport_free()
1003 kref_put(&hostport->ref, nvmet_fc_hostport_free); in nvmet_fc_hostport_put()
1009 return kref_get_unless_zero(&hostport->ref); in nvmet_fc_hostport_get()
1016 if (!hostport || !hostport->hosthandle) in nvmet_fc_free_hostport()
1027 lockdep_assert_held(&tgtport->lock); in nvmet_fc_match_hostport()
1029 list_for_each_entry(host, &tgtport->host_list, host_list) { in nvmet_fc_match_hostport()
1030 if (host->hosthandle == hosthandle && !host->invalid) { in nvmet_fc_match_hostport()
1054 return ERR_PTR(-EINVAL); in nvmet_fc_alloc_hostport()
1056 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1058 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1061 /* no new allocation - release reference */ in nvmet_fc_alloc_hostport()
1068 /* no new allocation - release reference */ in nvmet_fc_alloc_hostport()
1070 return ERR_PTR(-ENOMEM); in nvmet_fc_alloc_hostport()
1073 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1080 newhost->tgtport = tgtport; in nvmet_fc_alloc_hostport()
1081 newhost->hosthandle = hosthandle; in nvmet_fc_alloc_hostport()
1082 INIT_LIST_HEAD(&newhost->host_list); in nvmet_fc_alloc_hostport()
1083 kref_init(&newhost->ref); in nvmet_fc_alloc_hostport()
1085 list_add_tail(&newhost->host_list, &tgtport->host_list); in nvmet_fc_alloc_hostport()
1087 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_hostport()
1104 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_assoc_work()
1113 nvmet_fc_tgtport_get(assoc->tgtport); in nvmet_fc_schedule_delete_assoc()
1114 queue_work(nvmet_wq, &assoc->del_work); in nvmet_fc_schedule_delete_assoc()
1124 list_for_each_entry_rcu(a, &tgtport->assoc_list, a_list) { in nvmet_fc_assoc_exists()
1125 if (association_id == a->association_id) { in nvmet_fc_assoc_exists()
1144 if (!tgtport->pe) in nvmet_fc_alloc_target_assoc()
1151 idx = ida_alloc(&tgtport->assoc_cnt, GFP_KERNEL); in nvmet_fc_alloc_target_assoc()
1155 assoc->hostport = nvmet_fc_alloc_hostport(tgtport, hosthandle); in nvmet_fc_alloc_target_assoc()
1156 if (IS_ERR(assoc->hostport)) in nvmet_fc_alloc_target_assoc()
1159 assoc->tgtport = tgtport; in nvmet_fc_alloc_target_assoc()
1160 assoc->a_id = idx; in nvmet_fc_alloc_target_assoc()
1161 INIT_LIST_HEAD(&assoc->a_list); in nvmet_fc_alloc_target_assoc()
1162 kref_init(&assoc->ref); in nvmet_fc_alloc_target_assoc()
1163 INIT_WORK(&assoc->del_work, nvmet_fc_delete_assoc_work); in nvmet_fc_alloc_target_assoc()
1164 atomic_set(&assoc->terminating, 0); in nvmet_fc_alloc_target_assoc()
1168 get_random_bytes(&ran, sizeof(ran) - BYTES_FOR_QID); in nvmet_fc_alloc_target_assoc()
1171 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1173 assoc->association_id = ran; in nvmet_fc_alloc_target_assoc()
1174 list_add_tail_rcu(&assoc->a_list, &tgtport->assoc_list); in nvmet_fc_alloc_target_assoc()
1177 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_alloc_target_assoc()
1183 ida_free(&tgtport->assoc_cnt, idx); in nvmet_fc_alloc_target_assoc()
1194 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_target_assoc_free()
1199 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_target_assoc_free()
1200 if (assoc->queues[i]) in nvmet_fc_target_assoc_free()
1201 nvmet_fc_delete_target_queue(assoc->queues[i]); in nvmet_fc_target_assoc_free()
1207 nvmet_fc_free_hostport(assoc->hostport); in nvmet_fc_target_assoc_free()
1208 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1209 oldls = assoc->rcv_disconn; in nvmet_fc_target_assoc_free()
1210 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_target_assoc_free()
1214 ida_free(&tgtport->assoc_cnt, assoc->a_id); in nvmet_fc_target_assoc_free()
1215 dev_info(tgtport->dev, in nvmet_fc_target_assoc_free()
1217 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_target_assoc_free()
1224 kref_put(&assoc->ref, nvmet_fc_target_assoc_free); in nvmet_fc_tgt_a_put()
1230 return kref_get_unless_zero(&assoc->ref); in nvmet_fc_tgt_a_get()
1236 struct nvmet_fc_tgtport *tgtport = assoc->tgtport; in nvmet_fc_delete_target_assoc()
1240 terminating = atomic_xchg(&assoc->terminating, 1); in nvmet_fc_delete_target_assoc()
1246 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1247 list_del_rcu(&assoc->a_list); in nvmet_fc_delete_target_assoc()
1248 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_delete_target_assoc()
1252 /* ensure all in-flight I/Os have been processed */ in nvmet_fc_delete_target_assoc()
1253 for (i = NVMET_NR_QUEUES; i >= 0; i--) { in nvmet_fc_delete_target_assoc()
1254 if (assoc->queues[i]) in nvmet_fc_delete_target_assoc()
1255 flush_workqueue(assoc->queues[i]->work_q); in nvmet_fc_delete_target_assoc()
1258 dev_info(tgtport->dev, in nvmet_fc_delete_target_assoc()
1260 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_delete_target_assoc()
1271 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_find_target_assoc()
1272 if (association_id == assoc->association_id) { in nvmet_fc_find_target_assoc()
1291 pe->tgtport = tgtport; in nvmet_fc_portentry_bind()
1292 tgtport->pe = pe; in nvmet_fc_portentry_bind()
1294 pe->port = port; in nvmet_fc_portentry_bind()
1295 port->priv = pe; in nvmet_fc_portentry_bind()
1297 pe->node_name = tgtport->fc_target_port.node_name; in nvmet_fc_portentry_bind()
1298 pe->port_name = tgtport->fc_target_port.port_name; in nvmet_fc_portentry_bind()
1299 INIT_LIST_HEAD(&pe->pe_list); in nvmet_fc_portentry_bind()
1301 list_add_tail(&pe->pe_list, &nvmet_fc_portentry_list); in nvmet_fc_portentry_bind()
1310 if (pe->tgtport) in nvmet_fc_portentry_unbind()
1311 pe->tgtport->pe = NULL; in nvmet_fc_portentry_unbind()
1312 list_del(&pe->pe_list); in nvmet_fc_portentry_unbind()
1319 * re-registration can resume operation.
1328 pe = tgtport->pe; in nvmet_fc_portentry_unbind_tgt()
1330 pe->tgtport = NULL; in nvmet_fc_portentry_unbind_tgt()
1331 tgtport->pe = NULL; in nvmet_fc_portentry_unbind_tgt()
1351 if (tgtport->fc_target_port.node_name == pe->node_name && in nvmet_fc_portentry_rebind_tgt()
1352 tgtport->fc_target_port.port_name == pe->port_name) { in nvmet_fc_portentry_rebind_tgt()
1353 WARN_ON(pe->tgtport); in nvmet_fc_portentry_rebind_tgt()
1354 tgtport->pe = pe; in nvmet_fc_portentry_rebind_tgt()
1355 pe->tgtport = tgtport; in nvmet_fc_portentry_rebind_tgt()
1363 * nvmet_fc_register_targetport - transport entry point called by an
1377 * (ex: -ENXIO) upon failure.
1389 if (!template->xmt_ls_rsp || !template->fcp_op || in nvmet_fc_register_targetport()
1390 !template->fcp_abort || in nvmet_fc_register_targetport()
1391 !template->fcp_req_release || !template->targetport_delete || in nvmet_fc_register_targetport()
1392 !template->max_hw_queues || !template->max_sgl_segments || in nvmet_fc_register_targetport()
1393 !template->max_dif_sgl_segments || !template->dma_boundary) { in nvmet_fc_register_targetport()
1394 ret = -EINVAL; in nvmet_fc_register_targetport()
1398 newrec = kzalloc((sizeof(*newrec) + template->target_priv_sz), in nvmet_fc_register_targetport()
1401 ret = -ENOMEM; in nvmet_fc_register_targetport()
1407 ret = -ENOSPC; in nvmet_fc_register_targetport()
1412 ret = -ENODEV; in nvmet_fc_register_targetport()
1416 newrec->fc_target_port.node_name = pinfo->node_name; in nvmet_fc_register_targetport()
1417 newrec->fc_target_port.port_name = pinfo->port_name; in nvmet_fc_register_targetport()
1418 if (template->target_priv_sz) in nvmet_fc_register_targetport()
1419 newrec->fc_target_port.private = &newrec[1]; in nvmet_fc_register_targetport()
1421 newrec->fc_target_port.private = NULL; in nvmet_fc_register_targetport()
1422 newrec->fc_target_port.port_id = pinfo->port_id; in nvmet_fc_register_targetport()
1423 newrec->fc_target_port.port_num = idx; in nvmet_fc_register_targetport()
1424 INIT_LIST_HEAD(&newrec->tgt_list); in nvmet_fc_register_targetport()
1425 newrec->dev = dev; in nvmet_fc_register_targetport()
1426 newrec->ops = template; in nvmet_fc_register_targetport()
1427 spin_lock_init(&newrec->lock); in nvmet_fc_register_targetport()
1428 INIT_LIST_HEAD(&newrec->ls_rcv_list); in nvmet_fc_register_targetport()
1429 INIT_LIST_HEAD(&newrec->ls_req_list); in nvmet_fc_register_targetport()
1430 INIT_LIST_HEAD(&newrec->ls_busylist); in nvmet_fc_register_targetport()
1431 INIT_LIST_HEAD(&newrec->assoc_list); in nvmet_fc_register_targetport()
1432 INIT_LIST_HEAD(&newrec->host_list); in nvmet_fc_register_targetport()
1433 kref_init(&newrec->ref); in nvmet_fc_register_targetport()
1434 ida_init(&newrec->assoc_cnt); in nvmet_fc_register_targetport()
1435 newrec->max_sg_cnt = template->max_sgl_segments; in nvmet_fc_register_targetport()
1436 INIT_WORK(&newrec->put_work, nvmet_fc_put_tgtport_work); in nvmet_fc_register_targetport()
1440 ret = -ENOMEM; in nvmet_fc_register_targetport()
1447 list_add_tail(&newrec->tgt_list, &nvmet_fc_target_list); in nvmet_fc_register_targetport()
1450 *portptr = &newrec->fc_target_port; in nvmet_fc_register_targetport()
1471 struct device *dev = tgtport->dev; in nvmet_fc_free_tgtport()
1475 list_del(&tgtport->tgt_list); in nvmet_fc_free_tgtport()
1481 tgtport->ops->targetport_delete(&tgtport->fc_target_port); in nvmet_fc_free_tgtport()
1484 tgtport->fc_target_port.port_num); in nvmet_fc_free_tgtport()
1486 ida_destroy(&tgtport->assoc_cnt); in nvmet_fc_free_tgtport()
1496 kref_put(&tgtport->ref, nvmet_fc_free_tgtport); in nvmet_fc_tgtport_put()
1502 return kref_get_unless_zero(&tgtport->ref); in nvmet_fc_tgtport_get()
1511 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in __nvmet_fc_free_assocs()
1521 * nvmet_fc_invalidate_host - transport entry point called by an LLDD
1524 * The nvmet-fc layer ensures that any references to the hosthandle
1534 * retries by the nvmet-fc transport. The nvmet-fc transport may
1536 * NVME associations. The nvmet-fc transport will call the
1537 * ops->host_release() callback to notify the LLDD that all references
1558 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1560 &tgtport->assoc_list, a_list) { in nvmet_fc_invalidate_host()
1561 if (assoc->hostport->hosthandle != hosthandle) in nvmet_fc_invalidate_host()
1565 assoc->hostport->invalid = 1; in nvmet_fc_invalidate_host()
1570 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_invalidate_host()
1572 /* if there's nothing to wait for - call the callback */ in nvmet_fc_invalidate_host()
1573 if (noassoc && tgtport->ops->host_release) in nvmet_fc_invalidate_host()
1574 tgtport->ops->host_release(hosthandle); in nvmet_fc_invalidate_host()
1599 list_for_each_entry_rcu(assoc, &tgtport->assoc_list, a_list) { in nvmet_fc_delete_ctrl()
1600 queue = assoc->queues[0]; in nvmet_fc_delete_ctrl()
1601 if (queue && queue->nvme_sq.ctrl == ctrl) { in nvmet_fc_delete_ctrl()
1623 * nvmet_fc_unregister_targetport - transport entry point called by an
1631 * (ex: -ENXIO) upon failure.
1659 /* ********************** FC-NVME LS RCV Handling ************************* */
1666 struct fcnvme_ls_cr_assoc_rqst *rqst = &iod->rqstbuf->rq_cr_assoc; in nvmet_fc_ls_create_association()
1667 struct fcnvme_ls_cr_assoc_acc *acc = &iod->rspbuf->rsp_cr_assoc; in nvmet_fc_ls_create_association()
1674 * FC-NVME spec changes. There are initiators sending different in nvmet_fc_ls_create_association()
1681 if (iod->rqstdatalen < FCNVME_LSDESC_CRA_RQST_MINLEN) in nvmet_fc_ls_create_association()
1683 else if (be32_to_cpu(rqst->desc_list_len) < in nvmet_fc_ls_create_association()
1686 else if (rqst->assoc_cmd.desc_tag != in nvmet_fc_ls_create_association()
1689 else if (be32_to_cpu(rqst->assoc_cmd.desc_len) < in nvmet_fc_ls_create_association()
1692 else if (!rqst->assoc_cmd.ersp_ratio || in nvmet_fc_ls_create_association()
1693 (be16_to_cpu(rqst->assoc_cmd.ersp_ratio) >= in nvmet_fc_ls_create_association()
1694 be16_to_cpu(rqst->assoc_cmd.sqsize))) in nvmet_fc_ls_create_association()
1699 iod->assoc = nvmet_fc_alloc_target_assoc( in nvmet_fc_ls_create_association()
1700 tgtport, iod->hosthandle); in nvmet_fc_ls_create_association()
1701 if (!iod->assoc) in nvmet_fc_ls_create_association()
1704 queue = nvmet_fc_alloc_target_queue(iod->assoc, 0, in nvmet_fc_ls_create_association()
1705 be16_to_cpu(rqst->assoc_cmd.sqsize)); in nvmet_fc_ls_create_association()
1708 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_association()
1714 dev_err(tgtport->dev, in nvmet_fc_ls_create_association()
1717 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_association()
1718 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_association()
1724 queue->ersp_ratio = be16_to_cpu(rqst->assoc_cmd.ersp_ratio); in nvmet_fc_ls_create_association()
1725 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_association()
1726 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_association()
1728 dev_info(tgtport->dev, in nvmet_fc_ls_create_association()
1730 tgtport->fc_target_port.port_num, iod->assoc->a_id); in nvmet_fc_ls_create_association()
1734 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_association()
1740 acc->associd.desc_tag = cpu_to_be32(FCNVME_LSDESC_ASSOC_ID); in nvmet_fc_ls_create_association()
1741 acc->associd.desc_len = in nvmet_fc_ls_create_association()
1744 acc->associd.association_id = in nvmet_fc_ls_create_association()
1745 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, 0)); in nvmet_fc_ls_create_association()
1746 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_association()
1747 acc->connectid.desc_len = in nvmet_fc_ls_create_association()
1750 acc->connectid.connection_id = acc->associd.association_id; in nvmet_fc_ls_create_association()
1757 struct fcnvme_ls_cr_conn_rqst *rqst = &iod->rqstbuf->rq_cr_conn; in nvmet_fc_ls_create_connection()
1758 struct fcnvme_ls_cr_conn_acc *acc = &iod->rspbuf->rsp_cr_conn; in nvmet_fc_ls_create_connection()
1764 if (iod->rqstdatalen < sizeof(struct fcnvme_ls_cr_conn_rqst)) in nvmet_fc_ls_create_connection()
1766 else if (rqst->desc_list_len != in nvmet_fc_ls_create_connection()
1770 else if (rqst->associd.desc_tag != cpu_to_be32(FCNVME_LSDESC_ASSOC_ID)) in nvmet_fc_ls_create_connection()
1772 else if (rqst->associd.desc_len != in nvmet_fc_ls_create_connection()
1776 else if (rqst->connect_cmd.desc_tag != in nvmet_fc_ls_create_connection()
1779 else if (rqst->connect_cmd.desc_len != in nvmet_fc_ls_create_connection()
1783 else if (!rqst->connect_cmd.ersp_ratio || in nvmet_fc_ls_create_connection()
1784 (be16_to_cpu(rqst->connect_cmd.ersp_ratio) >= in nvmet_fc_ls_create_connection()
1785 be16_to_cpu(rqst->connect_cmd.sqsize))) in nvmet_fc_ls_create_connection()
1790 iod->assoc = nvmet_fc_find_target_assoc(tgtport, in nvmet_fc_ls_create_connection()
1791 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_create_connection()
1792 if (!iod->assoc) in nvmet_fc_ls_create_connection()
1795 queue = nvmet_fc_alloc_target_queue(iod->assoc, in nvmet_fc_ls_create_connection()
1796 be16_to_cpu(rqst->connect_cmd.qid), in nvmet_fc_ls_create_connection()
1797 be16_to_cpu(rqst->connect_cmd.sqsize)); in nvmet_fc_ls_create_connection()
1802 nvmet_fc_tgt_a_put(iod->assoc); in nvmet_fc_ls_create_connection()
1807 dev_err(tgtport->dev, in nvmet_fc_ls_create_connection()
1810 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_create_connection()
1811 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_create_connection()
1819 queue->ersp_ratio = be16_to_cpu(rqst->connect_cmd.ersp_ratio); in nvmet_fc_ls_create_connection()
1820 atomic_set(&queue->connected, 1); in nvmet_fc_ls_create_connection()
1821 queue->sqhd = 0; /* best place to init value */ in nvmet_fc_ls_create_connection()
1825 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_create_connection()
1830 acc->connectid.desc_tag = cpu_to_be32(FCNVME_LSDESC_CONN_ID); in nvmet_fc_ls_create_connection()
1831 acc->connectid.desc_len = in nvmet_fc_ls_create_connection()
1834 acc->connectid.connection_id = in nvmet_fc_ls_create_connection()
1835 cpu_to_be64(nvmet_fc_makeconnid(iod->assoc, in nvmet_fc_ls_create_connection()
1836 be16_to_cpu(rqst->connect_cmd.qid))); in nvmet_fc_ls_create_connection()
1848 &iod->rqstbuf->rq_dis_assoc; in nvmet_fc_ls_disconnect()
1850 &iod->rspbuf->rsp_dis_assoc; in nvmet_fc_ls_disconnect()
1858 ret = nvmefc_vldt_lsreq_discon_assoc(iod->rqstdatalen, rqst); in nvmet_fc_ls_disconnect()
1860 /* match an active association - takes an assoc ref if !NULL */ in nvmet_fc_ls_disconnect()
1862 be64_to_cpu(rqst->associd.association_id)); in nvmet_fc_ls_disconnect()
1863 iod->assoc = assoc; in nvmet_fc_ls_disconnect()
1869 dev_err(tgtport->dev, in nvmet_fc_ls_disconnect()
1872 iod->lsrsp->rsplen = nvme_fc_format_rjt(acc, in nvmet_fc_ls_disconnect()
1873 sizeof(*acc), rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1883 iod->lsrsp->rsplen = sizeof(*acc); in nvmet_fc_ls_disconnect()
1899 spin_lock_irqsave(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1900 oldls = assoc->rcv_disconn; in nvmet_fc_ls_disconnect()
1901 assoc->rcv_disconn = iod; in nvmet_fc_ls_disconnect()
1902 spin_unlock_irqrestore(&tgtport->lock, flags); in nvmet_fc_ls_disconnect()
1905 dev_info(tgtport->dev, in nvmet_fc_ls_disconnect()
1908 tgtport->fc_target_port.port_num, assoc->a_id); in nvmet_fc_ls_disconnect()
1910 oldls->lsrsp->rsplen = nvme_fc_format_rjt(oldls->rspbuf, in nvmet_fc_ls_disconnect()
1911 sizeof(*iod->rspbuf), in nvmet_fc_ls_disconnect()
1913 rqst->w0.ls_cmd, in nvmet_fc_ls_disconnect()
1936 struct nvmet_fc_ls_iod *iod = lsrsp->nvme_fc_private; in nvmet_fc_xmt_ls_rsp_done()
1937 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_xmt_ls_rsp_done()
1939 fc_dma_sync_single_for_cpu(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp_done()
1940 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp_done()
1951 fc_dma_sync_single_for_device(tgtport->dev, iod->rspdma, in nvmet_fc_xmt_ls_rsp()
1952 sizeof(*iod->rspbuf), DMA_TO_DEVICE); in nvmet_fc_xmt_ls_rsp()
1954 ret = tgtport->ops->xmt_ls_rsp(&tgtport->fc_target_port, iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1956 nvmet_fc_xmt_ls_rsp_done(iod->lsrsp); in nvmet_fc_xmt_ls_rsp()
1960 * Actual processing routine for received FC-NVME LS Requests from the LLD
1966 struct fcnvme_ls_rqst_w0 *w0 = &iod->rqstbuf->rq_cr_assoc.w0; in nvmet_fc_handle_ls_rqst()
1969 iod->lsrsp->nvme_fc_private = iod; in nvmet_fc_handle_ls_rqst()
1970 iod->lsrsp->rspbuf = iod->rspbuf; in nvmet_fc_handle_ls_rqst()
1971 iod->lsrsp->rspdma = iod->rspdma; in nvmet_fc_handle_ls_rqst()
1972 iod->lsrsp->done = nvmet_fc_xmt_ls_rsp_done; in nvmet_fc_handle_ls_rqst()
1974 iod->lsrsp->rsplen = 0; in nvmet_fc_handle_ls_rqst()
1976 iod->assoc = NULL; in nvmet_fc_handle_ls_rqst()
1983 switch (w0->ls_cmd) { in nvmet_fc_handle_ls_rqst()
1997 iod->lsrsp->rsplen = nvme_fc_format_rjt(iod->rspbuf, in nvmet_fc_handle_ls_rqst()
1998 sizeof(*iod->rspbuf), w0->ls_cmd, in nvmet_fc_handle_ls_rqst()
2007 * Actual processing routine for received FC-NVME LS Requests from the LLD
2014 struct nvmet_fc_tgtport *tgtport = iod->tgtport; in nvmet_fc_handle_ls_rqst_work()
2021 * nvmet_fc_rcv_ls_req - transport entry point called by an LLDD
2024 * The nvmet-fc layer will copy payload to an internal structure for
2049 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2051 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2052 nvmefc_ls_names[w0->ls_cmd] : "", in nvmet_fc_rcv_ls_req()
2054 return -E2BIG; in nvmet_fc_rcv_ls_req()
2058 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2060 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2061 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2062 return -ESHUTDOWN; in nvmet_fc_rcv_ls_req()
2067 dev_info(tgtport->dev, in nvmet_fc_rcv_ls_req()
2069 (w0->ls_cmd <= NVME_FC_LAST_LS_CMD_VALUE) ? in nvmet_fc_rcv_ls_req()
2070 nvmefc_ls_names[w0->ls_cmd] : ""); in nvmet_fc_rcv_ls_req()
2072 return -ENOENT; in nvmet_fc_rcv_ls_req()
2075 iod->lsrsp = lsrsp; in nvmet_fc_rcv_ls_req()
2076 iod->fcpreq = NULL; in nvmet_fc_rcv_ls_req()
2077 memcpy(iod->rqstbuf, lsreqbuf, lsreqbuf_len); in nvmet_fc_rcv_ls_req()
2078 iod->rqstdatalen = lsreqbuf_len; in nvmet_fc_rcv_ls_req()
2079 iod->hosthandle = hosthandle; in nvmet_fc_rcv_ls_req()
2081 queue_work(nvmet_wq, &iod->work); in nvmet_fc_rcv_ls_req()
2100 sg = sgl_alloc(fod->req.transfer_len, GFP_KERNEL, &nent); in nvmet_fc_alloc_tgt_pgs()
2104 fod->data_sg = sg; in nvmet_fc_alloc_tgt_pgs()
2105 fod->data_sg_cnt = nent; in nvmet_fc_alloc_tgt_pgs()
2106 fod->data_sg_cnt = fc_dma_map_sg(fod->tgtport->dev, sg, nent, in nvmet_fc_alloc_tgt_pgs()
2107 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_alloc_tgt_pgs()
2110 fod->next_sg = fod->data_sg; in nvmet_fc_alloc_tgt_pgs()
2121 if (!fod->data_sg || !fod->data_sg_cnt) in nvmet_fc_free_tgt_pgs()
2124 fc_dma_unmap_sg(fod->tgtport->dev, fod->data_sg, fod->data_sg_cnt, in nvmet_fc_free_tgt_pgs()
2125 ((fod->io_dir == NVMET_FCP_WRITE) ? in nvmet_fc_free_tgt_pgs()
2127 sgl_free(fod->data_sg); in nvmet_fc_free_tgt_pgs()
2128 fod->data_sg = NULL; in nvmet_fc_free_tgt_pgs()
2129 fod->data_sg_cnt = 0; in nvmet_fc_free_tgt_pgs()
2139 sqtail = atomic_read(&q->sqtail) % q->sqsize; in queue_90percent_full()
2141 used = (sqtail < sqhd) ? (sqtail + q->sqsize - sqhd) : (sqtail - sqhd); in queue_90percent_full()
2142 return ((used * 10) >= (((u32)(q->sqsize - 1) * 9))); in queue_90percent_full()
2153 struct nvme_fc_ersp_iu *ersp = &fod->rspiubuf; in nvmet_fc_prep_fcp_rsp()
2154 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in nvmet_fc_prep_fcp_rsp()
2155 struct nvme_completion *cqe = &ersp->cqe; in nvmet_fc_prep_fcp_rsp() local
2156 u32 *cqewd = (u32 *)cqe; in nvmet_fc_prep_fcp_rsp()
2160 if (fod->fcpreq->op == NVMET_FCOP_READDATA_RSP) in nvmet_fc_prep_fcp_rsp()
2161 xfr_length = fod->req.transfer_len; in nvmet_fc_prep_fcp_rsp()
2163 xfr_length = fod->offset; in nvmet_fc_prep_fcp_rsp()
2167 * Note: to send a 0's response, the NVME-FC host transport will in nvmet_fc_prep_fcp_rsp()
2168 * recreate the CQE. The host transport knows: sq id, SQHD (last in nvmet_fc_prep_fcp_rsp()
2170 * zero-filled CQE with those known fields filled in. Transport in nvmet_fc_prep_fcp_rsp()
2171 * must send an ersp for any condition where the cqe won't match in nvmet_fc_prep_fcp_rsp()
2174 * Here are the FC-NVME mandated cases where we must send an ersp: in nvmet_fc_prep_fcp_rsp()
2176 * force fabric commands to send ersp's (not in FC-NVME but good in nvmet_fc_prep_fcp_rsp()
2178 * normal cmds: any time status is non-zero, or status is zero in nvmet_fc_prep_fcp_rsp()
2179 * but words 0 or 1 are non-zero. in nvmet_fc_prep_fcp_rsp()
2184 rspcnt = atomic_inc_return(&fod->queue->zrspcnt); in nvmet_fc_prep_fcp_rsp()
2185 if (!(rspcnt % fod->queue->ersp_ratio) || in nvmet_fc_prep_fcp_rsp()
2187 xfr_length != fod->req.transfer_len || in nvmet_fc_prep_fcp_rsp()
2188 (le16_to_cpu(cqe->status) & 0xFFFE) || cqewd[0] || cqewd[1] || in nvmet_fc_prep_fcp_rsp()
2189 (sqe->flags & (NVME_CMD_FUSE_FIRST | NVME_CMD_FUSE_SECOND)) || in nvmet_fc_prep_fcp_rsp()
2190 queue_90percent_full(fod->queue, le16_to_cpu(cqe->sq_head))) in nvmet_fc_prep_fcp_rsp()
2193 /* re-set the fields */ in nvmet_fc_prep_fcp_rsp()
2194 fod->fcpreq->rspaddr = ersp; in nvmet_fc_prep_fcp_rsp()
2195 fod->fcpreq->rspdma = fod->rspdma; in nvmet_fc_prep_fcp_rsp()
2199 fod->fcpreq->rsplen = NVME_FC_SIZEOF_ZEROS_RSP; in nvmet_fc_prep_fcp_rsp()
2201 ersp->iu_len = cpu_to_be16(sizeof(*ersp)/sizeof(u32)); in nvmet_fc_prep_fcp_rsp()
2202 rsn = atomic_inc_return(&fod->queue->rsn); in nvmet_fc_prep_fcp_rsp()
2203 ersp->rsn = cpu_to_be32(rsn); in nvmet_fc_prep_fcp_rsp()
2204 ersp->xfrd_len = cpu_to_be32(xfr_length); in nvmet_fc_prep_fcp_rsp()
2205 fod->fcpreq->rsplen = sizeof(*ersp); in nvmet_fc_prep_fcp_rsp()
2208 fc_dma_sync_single_for_device(tgtport->dev, fod->rspdma, in nvmet_fc_prep_fcp_rsp()
2209 sizeof(fod->rspiubuf), DMA_TO_DEVICE); in nvmet_fc_prep_fcp_rsp()
2218 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_abort_op()
2227 /* no need to take lock - lock was taken earlier to get here */ in nvmet_fc_abort_op()
2228 if (!fod->aborted) in nvmet_fc_abort_op()
2229 tgtport->ops->fcp_abort(&tgtport->fc_target_port, fcpreq); in nvmet_fc_abort_op()
2231 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_abort_op()
2240 fod->fcpreq->op = NVMET_FCOP_RSP; in nvmet_fc_xmt_fcp_rsp()
2241 fod->fcpreq->timeout = 0; in nvmet_fc_xmt_fcp_rsp()
2245 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_xmt_fcp_rsp()
2254 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_transfer_fcp_data()
2255 struct scatterlist *sg = fod->next_sg; in nvmet_fc_transfer_fcp_data()
2257 u32 remaininglen = fod->req.transfer_len - fod->offset; in nvmet_fc_transfer_fcp_data()
2261 fcpreq->op = op; in nvmet_fc_transfer_fcp_data()
2262 fcpreq->offset = fod->offset; in nvmet_fc_transfer_fcp_data()
2263 fcpreq->timeout = NVME_FC_TGTOP_TIMEOUT_SEC; in nvmet_fc_transfer_fcp_data()
2274 fcpreq->sg = sg; in nvmet_fc_transfer_fcp_data()
2275 fcpreq->sg_cnt = 0; in nvmet_fc_transfer_fcp_data()
2277 fcpreq->sg_cnt < tgtport->max_sg_cnt && in nvmet_fc_transfer_fcp_data()
2279 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2283 if (tlen < remaininglen && fcpreq->sg_cnt == 0) { in nvmet_fc_transfer_fcp_data()
2284 fcpreq->sg_cnt++; in nvmet_fc_transfer_fcp_data()
2289 fod->next_sg = sg; in nvmet_fc_transfer_fcp_data()
2291 fod->next_sg = NULL; in nvmet_fc_transfer_fcp_data()
2293 fcpreq->transfer_length = tlen; in nvmet_fc_transfer_fcp_data()
2294 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2295 fcpreq->fcp_error = 0; in nvmet_fc_transfer_fcp_data()
2296 fcpreq->rsplen = 0; in nvmet_fc_transfer_fcp_data()
2299 * If the last READDATA request: check if LLDD supports in nvmet_fc_transfer_fcp_data()
2303 ((fod->offset + fcpreq->transfer_length) == fod->req.transfer_len) && in nvmet_fc_transfer_fcp_data()
2304 (tgtport->ops->target_features & NVMET_FCTGTFEAT_READDATA_RSP)) { in nvmet_fc_transfer_fcp_data()
2305 fcpreq->op = NVMET_FCOP_READDATA_RSP; in nvmet_fc_transfer_fcp_data()
2309 ret = tgtport->ops->fcp_op(&tgtport->fc_target_port, fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2316 fod->abort = true; in nvmet_fc_transfer_fcp_data()
2319 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2320 fod->writedataactive = false; in nvmet_fc_transfer_fcp_data()
2321 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_transfer_fcp_data()
2322 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_transfer_fcp_data()
2324 fcpreq->fcp_error = ret; in nvmet_fc_transfer_fcp_data()
2325 fcpreq->transferred_length = 0; in nvmet_fc_transfer_fcp_data()
2326 nvmet_fc_xmt_fcp_op_done(fod->fcpreq); in nvmet_fc_transfer_fcp_data()
2334 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in __nvmet_fc_fod_op_abort()
2335 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in __nvmet_fc_fod_op_abort()
2339 if (fcpreq->op == NVMET_FCOP_WRITEDATA) { in __nvmet_fc_fod_op_abort()
2340 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in __nvmet_fc_fod_op_abort()
2357 struct nvmefc_tgt_fcp_req *fcpreq = fod->fcpreq; in nvmet_fc_fod_op_done()
2358 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fod_op_done()
2362 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2363 abort = fod->abort; in nvmet_fc_fod_op_done()
2364 fod->writedataactive = false; in nvmet_fc_fod_op_done()
2365 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2367 switch (fcpreq->op) { in nvmet_fc_fod_op_done()
2372 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2373 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2374 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2375 fod->abort = true; in nvmet_fc_fod_op_done()
2376 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2378 nvmet_req_complete(&fod->req, NVME_SC_INTERNAL); in nvmet_fc_fod_op_done()
2382 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2383 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2384 spin_lock_irqsave(&fod->flock, flags); in nvmet_fc_fod_op_done()
2385 fod->writedataactive = true; in nvmet_fc_fod_op_done()
2386 spin_unlock_irqrestore(&fod->flock, flags); in nvmet_fc_fod_op_done()
2395 fod->req.execute(&fod->req); in nvmet_fc_fod_op_done()
2402 if (fcpreq->fcp_error || in nvmet_fc_fod_op_done()
2403 fcpreq->transferred_length != fcpreq->transfer_length) { in nvmet_fc_fod_op_done()
2410 if (fcpreq->op == NVMET_FCOP_READDATA_RSP) { in nvmet_fc_fod_op_done()
2413 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2417 fod->offset += fcpreq->transferred_length; in nvmet_fc_fod_op_done()
2418 if (fod->offset != fod->req.transfer_len) { in nvmet_fc_fod_op_done()
2437 nvmet_fc_free_fcp_iod(fod->queue, fod); in nvmet_fc_fod_op_done()
2448 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_xmt_fcp_op_done()
2460 struct nvme_common_command *sqe = &fod->cmdiubuf.sqe.common; in __nvmet_fc_fcp_nvme_cmd_done()
2461 struct nvme_completion *cqe = &fod->rspiubuf.cqe; in __nvmet_fc_fcp_nvme_cmd_done() local
2465 spin_lock_irqsave(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2466 abort = fod->abort; in __nvmet_fc_fcp_nvme_cmd_done()
2467 spin_unlock_irqrestore(&fod->flock, flags); in __nvmet_fc_fcp_nvme_cmd_done()
2469 /* if we have a CQE, snoop the last sq_head value */ in __nvmet_fc_fcp_nvme_cmd_done()
2471 fod->queue->sqhd = cqe->sq_head; in __nvmet_fc_fcp_nvme_cmd_done()
2480 /* fudge up a failed CQE status for our transport error */ in __nvmet_fc_fcp_nvme_cmd_done()
2481 memset(cqe, 0, sizeof(*cqe)); in __nvmet_fc_fcp_nvme_cmd_done()
2482 cqe->sq_head = fod->queue->sqhd; /* echo last cqe sqhd */ in __nvmet_fc_fcp_nvme_cmd_done()
2483 cqe->sq_id = cpu_to_le16(fod->queue->qid); in __nvmet_fc_fcp_nvme_cmd_done()
2484 cqe->command_id = sqe->command_id; in __nvmet_fc_fcp_nvme_cmd_done()
2485 cqe->status = cpu_to_le16(status); in __nvmet_fc_fcp_nvme_cmd_done()
2489 * try to push the data even if the SQE status is non-zero. in __nvmet_fc_fcp_nvme_cmd_done()
2493 if ((fod->io_dir == NVMET_FCP_READ) && (fod->data_sg_cnt)) { in __nvmet_fc_fcp_nvme_cmd_done()
2500 /* writes & no data - fall thru */ in __nvmet_fc_fcp_nvme_cmd_done()
2514 struct nvmet_fc_tgtport *tgtport = fod->tgtport; in nvmet_fc_fcp_nvme_cmd_done()
2521 * Actual processing routine for received FC-NVME I/O Requests from the LLD
2527 struct nvme_fc_cmd_iu *cmdiu = &fod->cmdiubuf; in nvmet_fc_handle_fcp_rqst()
2528 u32 xfrlen = be32_to_cpu(cmdiu->data_len); in nvmet_fc_handle_fcp_rqst()
2540 fod->fcpreq->done = nvmet_fc_xmt_fcp_op_done; in nvmet_fc_handle_fcp_rqst()
2542 if (cmdiu->flags & FCNVME_CMD_FLAGS_WRITE) { in nvmet_fc_handle_fcp_rqst()
2543 fod->io_dir = NVMET_FCP_WRITE; in nvmet_fc_handle_fcp_rqst()
2544 if (!nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2546 } else if (cmdiu->flags & FCNVME_CMD_FLAGS_READ) { in nvmet_fc_handle_fcp_rqst()
2547 fod->io_dir = NVMET_FCP_READ; in nvmet_fc_handle_fcp_rqst()
2548 if (nvme_is_write(&cmdiu->sqe)) in nvmet_fc_handle_fcp_rqst()
2551 fod->io_dir = NVMET_FCP_NODATA; in nvmet_fc_handle_fcp_rqst()
2556 fod->req.cmd = &fod->cmdiubuf.sqe; in nvmet_fc_handle_fcp_rqst()
2557 fod->req.cqe = &fod->rspiubuf.cqe; in nvmet_fc_handle_fcp_rqst()
2558 if (!tgtport->pe) in nvmet_fc_handle_fcp_rqst()
2560 fod->req.port = tgtport->pe->port; in nvmet_fc_handle_fcp_rqst()
2563 memset(&fod->rspiubuf, 0, sizeof(fod->rspiubuf)); in nvmet_fc_handle_fcp_rqst()
2565 fod->data_sg = NULL; in nvmet_fc_handle_fcp_rqst()
2566 fod->data_sg_cnt = 0; in nvmet_fc_handle_fcp_rqst()
2568 ret = nvmet_req_init(&fod->req, in nvmet_fc_handle_fcp_rqst()
2569 &fod->queue->nvme_cq, in nvmet_fc_handle_fcp_rqst()
2570 &fod->queue->nvme_sq, in nvmet_fc_handle_fcp_rqst()
2578 fod->req.transfer_len = xfrlen; in nvmet_fc_handle_fcp_rqst()
2581 atomic_inc(&fod->queue->sqtail); in nvmet_fc_handle_fcp_rqst()
2583 if (fod->req.transfer_len) { in nvmet_fc_handle_fcp_rqst()
2586 nvmet_req_complete(&fod->req, ret); in nvmet_fc_handle_fcp_rqst()
2590 fod->req.sg = fod->data_sg; in nvmet_fc_handle_fcp_rqst()
2591 fod->req.sg_cnt = fod->data_sg_cnt; in nvmet_fc_handle_fcp_rqst()
2592 fod->offset = 0; in nvmet_fc_handle_fcp_rqst()
2594 if (fod->io_dir == NVMET_FCP_WRITE) { in nvmet_fc_handle_fcp_rqst()
2606 fod->req.execute(&fod->req); in nvmet_fc_handle_fcp_rqst()
2614 * nvmet_fc_rcv_fcp_req - transport entry point called by an LLDD
2617 * Pass a FC-NVME FCP CMD IU received from the FC link to the nvmet-fc
2630 * asynchronously received - its possible for a command to be received
2637 * routine returns a -EOVERFLOW status. Subsequently, when a queue job
2643 * The LLDD, when receiving an -EOVERFLOW completion status, is to treat
2649 * transport will return a non-zero status indicating the error.
2650 * In all cases other than -EOVERFLOW, the transport has not accepted the
2674 (cmdiu->format_id != NVME_CMD_FORMAT_ID) || in nvmet_fc_rcv_fcp_req()
2675 (cmdiu->fc_id != NVME_CMD_FC_ID) || in nvmet_fc_rcv_fcp_req()
2676 (be16_to_cpu(cmdiu->iu_len) != (sizeof(*cmdiu)/4))) in nvmet_fc_rcv_fcp_req()
2677 return -EIO; in nvmet_fc_rcv_fcp_req()
2680 be64_to_cpu(cmdiu->connection_id)); in nvmet_fc_rcv_fcp_req()
2682 return -ENOTCONN; in nvmet_fc_rcv_fcp_req()
2691 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2695 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2697 fcpreq->nvmet_fc_private = fod; in nvmet_fc_rcv_fcp_req()
2698 fod->fcpreq = fcpreq; in nvmet_fc_rcv_fcp_req()
2700 memcpy(&fod->cmdiubuf, cmdiubuf, cmdiubuf_len); in nvmet_fc_rcv_fcp_req()
2707 if (!tgtport->ops->defer_rcv) { in nvmet_fc_rcv_fcp_req()
2708 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2711 return -ENOENT; in nvmet_fc_rcv_fcp_req()
2714 deferfcp = list_first_entry_or_null(&queue->avail_defer_list, in nvmet_fc_rcv_fcp_req()
2717 /* Just re-use one that was previously allocated */ in nvmet_fc_rcv_fcp_req()
2718 list_del(&deferfcp->req_list); in nvmet_fc_rcv_fcp_req()
2720 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2727 return -ENOMEM; in nvmet_fc_rcv_fcp_req()
2729 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2733 fcpreq->rspaddr = cmdiubuf; in nvmet_fc_rcv_fcp_req()
2734 fcpreq->rsplen = cmdiubuf_len; in nvmet_fc_rcv_fcp_req()
2735 deferfcp->fcp_req = fcpreq; in nvmet_fc_rcv_fcp_req()
2738 list_add_tail(&deferfcp->req_list, &queue->pending_cmd_list); in nvmet_fc_rcv_fcp_req()
2742 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_req()
2744 return -EOVERFLOW; in nvmet_fc_rcv_fcp_req()
2749 * nvmet_fc_rcv_fcp_abort - transport entry point called by an LLDD
2755 * (template_ops->fcp_req_release() has not been called).
2775 struct nvmet_fc_fcp_iod *fod = fcpreq->nvmet_fc_private; in nvmet_fc_rcv_fcp_abort()
2779 if (!fod || fod->fcpreq != fcpreq) in nvmet_fc_rcv_fcp_abort()
2783 queue = fod->queue; in nvmet_fc_rcv_fcp_abort()
2785 spin_lock_irqsave(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2786 if (fod->active) { in nvmet_fc_rcv_fcp_abort()
2792 spin_lock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2793 fod->abort = true; in nvmet_fc_rcv_fcp_abort()
2794 fod->aborted = true; in nvmet_fc_rcv_fcp_abort()
2795 spin_unlock(&fod->flock); in nvmet_fc_rcv_fcp_abort()
2797 spin_unlock_irqrestore(&queue->qlock, flags); in nvmet_fc_rcv_fcp_abort()
2813 return -EINVAL; in __nvme_fc_parse_u64()
2828 substring_t wwn = { name, &name[sizeof(name)-1] }; in nvme_fc_parse_traddr()
2833 !strncmp(buf, "nn-0x", NVME_FC_TRADDR_OXNNLEN) && in nvme_fc_parse_traddr()
2835 "pn-0x", NVME_FC_TRADDR_OXNNLEN)) { in nvme_fc_parse_traddr()
2840 !strncmp(buf, "nn-", NVME_FC_TRADDR_NNLEN) && in nvme_fc_parse_traddr()
2842 "pn-", NVME_FC_TRADDR_NNLEN))) { in nvme_fc_parse_traddr()
2853 if (__nvme_fc_parse_u64(&wwn, &traddr->nn)) in nvme_fc_parse_traddr()
2857 if (__nvme_fc_parse_u64(&wwn, &traddr->pn)) in nvme_fc_parse_traddr()
2864 return -EINVAL; in nvme_fc_parse_traddr()
2877 if ((port->disc_addr.trtype != NVMF_TRTYPE_FC) || in nvmet_fc_add_port()
2878 (port->disc_addr.adrfam != NVMF_ADDR_FAMILY_FC)) in nvmet_fc_add_port()
2879 return -EINVAL; in nvmet_fc_add_port()
2883 ret = nvme_fc_parse_traddr(&traddr, port->disc_addr.traddr, in nvmet_fc_add_port()
2884 sizeof(port->disc_addr.traddr)); in nvmet_fc_add_port()
2890 return -ENOMEM; in nvmet_fc_add_port()
2892 ret = -ENXIO; in nvmet_fc_add_port()
2895 if ((tgtport->fc_target_port.node_name == traddr.nn) && in nvmet_fc_add_port()
2896 (tgtport->fc_target_port.port_name == traddr.pn)) { in nvmet_fc_add_port()
2898 if (!tgtport->pe) { in nvmet_fc_add_port()
2902 ret = -EALREADY; in nvmet_fc_add_port()
2917 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_remove_port()
2922 __nvmet_fc_free_assocs(pe->tgtport); in nvmet_fc_remove_port()
2930 struct nvmet_fc_port_entry *pe = port->priv; in nvmet_fc_discovery_chg()
2931 struct nvmet_fc_tgtport *tgtport = pe->tgtport; in nvmet_fc_discovery_chg()
2933 if (tgtport && tgtport->ops->discovery_event) in nvmet_fc_discovery_chg()
2934 tgtport->ops->discovery_event(&tgtport->fc_target_port); in nvmet_fc_discovery_chg()
2941 struct nvmet_sq *sq = ctrl->sqs[0]; in nvmet_fc_host_traddr()
2944 struct nvmet_fc_tgtport *tgtport = queue->assoc ? queue->assoc->tgtport : NULL; in nvmet_fc_host_traddr()
2945 struct nvmet_fc_hostport *hostport = queue->assoc ? queue->assoc->hostport : NULL; in nvmet_fc_host_traddr()
2950 return -ENODEV; in nvmet_fc_host_traddr()
2952 ret = -ENODEV; in nvmet_fc_host_traddr()
2956 if (tgtport->ops->host_traddr) { in nvmet_fc_host_traddr()
2957 ret = tgtport->ops->host_traddr(hostport->hosthandle, &wwnn, &wwpn); in nvmet_fc_host_traddr()
2960 ret = snprintf(traddr, traddr_size, "nn-0x%llx:pn-0x%llx", wwnn, wwpn); in nvmet_fc_host_traddr()
2991 /* sanity check - all lports should be removed */ in nvmet_fc_exit_module()