Lines Matching +full:reserved +full:- +full:ipi +full:- +full:vectors
2 * Copyright (c) 2005 Cisco Systems. All rights reserved.
14 * - Redistributions of source code must retain the above
18 * - Redistributions in binary form must reproduce the above
137 …le completion vectors. The default value is the minimum of four times the number of online CPU soc…
164 int tmo = *(int *)kp->arg; in srp_tmo_get()
180 if (kp->arg == &srp_reconnect_delay) in srp_tmo_set()
183 else if (kp->arg == &srp_fast_io_fail_tmo) in srp_tmo_set()
190 *(int *)kp->arg = tmo; in srp_tmo_set()
203 return (struct srp_target_port *) host->hostdata; in host_to_target()
208 return host_to_target(host)->target_name; in srp_target_info()
217 (!memcmp(&target->ioc_guid, topspin_oui, sizeof topspin_oui) || in srp_target_is_topspin()
218 !memcmp(&target->ioc_guid, cisco_oui, sizeof cisco_oui)); in srp_target_is_topspin()
231 iu->buf = kzalloc(size, gfp_mask); in srp_alloc_iu()
232 if (!iu->buf) in srp_alloc_iu()
235 iu->dma = ib_dma_map_single(host->srp_dev->dev, iu->buf, size, in srp_alloc_iu()
237 if (ib_dma_mapping_error(host->srp_dev->dev, iu->dma)) in srp_alloc_iu()
240 iu->size = size; in srp_alloc_iu()
241 iu->direction = direction; in srp_alloc_iu()
246 kfree(iu->buf); in srp_alloc_iu()
258 ib_dma_unmap_single(host->srp_dev->dev, iu->dma, iu->size, in srp_free_iu()
259 iu->direction); in srp_free_iu()
260 kfree(iu->buf); in srp_free_iu()
267 ib_event_msg(event->event), event->event); in srp_qp_event()
278 return -ENOMEM; in srp_init_ib_qp()
280 ret = ib_find_cached_pkey(target->srp_host->srp_dev->dev, in srp_init_ib_qp()
281 target->srp_host->port, in srp_init_ib_qp()
282 be16_to_cpu(target->ib_cm.pkey), in srp_init_ib_qp()
283 &attr->pkey_index); in srp_init_ib_qp()
287 attr->qp_state = IB_QPS_INIT; in srp_init_ib_qp()
288 attr->qp_access_flags = (IB_ACCESS_REMOTE_READ | in srp_init_ib_qp()
290 attr->port_num = target->srp_host->port; in srp_init_ib_qp()
305 struct srp_target_port *target = ch->target; in srp_new_ib_cm_id()
308 new_cm_id = ib_create_cm_id(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
313 if (ch->ib_cm.cm_id) in srp_new_ib_cm_id()
314 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_new_ib_cm_id()
315 ch->ib_cm.cm_id = new_cm_id; in srp_new_ib_cm_id()
316 if (rdma_cap_opa_ah(target->srp_host->srp_dev->dev, in srp_new_ib_cm_id()
317 target->srp_host->port)) in srp_new_ib_cm_id()
318 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_OPA; in srp_new_ib_cm_id()
320 ch->ib_cm.path.rec_type = SA_PATH_REC_TYPE_IB; in srp_new_ib_cm_id()
321 ch->ib_cm.path.sgid = target->sgid; in srp_new_ib_cm_id()
322 ch->ib_cm.path.dgid = target->ib_cm.orig_dgid; in srp_new_ib_cm_id()
323 ch->ib_cm.path.pkey = target->ib_cm.pkey; in srp_new_ib_cm_id()
324 ch->ib_cm.path.service_id = target->ib_cm.service_id; in srp_new_ib_cm_id()
331 struct srp_target_port *target = ch->target; in srp_new_rdma_cm_id()
335 new_cm_id = rdma_create_id(target->net, srp_rdma_cm_handler, ch, in srp_new_rdma_cm_id()
343 init_completion(&ch->done); in srp_new_rdma_cm_id()
344 ret = rdma_resolve_addr(new_cm_id, target->rdma_cm.src_specified ? in srp_new_rdma_cm_id()
345 &target->rdma_cm.src.sa : NULL, in srp_new_rdma_cm_id()
346 &target->rdma_cm.dst.sa, in srp_new_rdma_cm_id()
350 &target->rdma_cm.src, &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
353 ret = wait_for_completion_interruptible(&ch->done); in srp_new_rdma_cm_id()
357 ret = ch->status; in srp_new_rdma_cm_id()
360 &target->rdma_cm.dst, ret); in srp_new_rdma_cm_id()
364 swap(ch->rdma_cm.cm_id, new_cm_id); in srp_new_rdma_cm_id()
375 struct srp_target_port *target = ch->target; in srp_new_cm_id()
377 return target->using_rdma_cm ? srp_new_rdma_cm_id(ch) : in srp_new_cm_id()
382 * srp_destroy_fr_pool() - free the resources owned by a pool
393 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_destroy_fr_pool()
394 if (d->mr) in srp_destroy_fr_pool()
395 ib_dereg_mr(d->mr); in srp_destroy_fr_pool()
401 * srp_create_fr_pool() - allocate and initialize a pool for fast registration
414 int i, ret = -EINVAL; in srp_create_fr_pool()
419 ret = -ENOMEM; in srp_create_fr_pool()
423 pool->size = pool_size; in srp_create_fr_pool()
424 pool->max_page_list_len = max_page_list_len; in srp_create_fr_pool()
425 spin_lock_init(&pool->lock); in srp_create_fr_pool()
426 INIT_LIST_HEAD(&pool->free_list); in srp_create_fr_pool()
428 if (device->attrs.kernel_cap_flags & IBK_SG_GAPS_REG) in srp_create_fr_pool()
433 for (i = 0, d = &pool->desc[0]; i < pool->size; i++, d++) { in srp_create_fr_pool()
437 if (ret == -ENOMEM) in srp_create_fr_pool()
439 dev_name(&device->dev)); in srp_create_fr_pool()
442 d->mr = mr; in srp_create_fr_pool()
443 list_add_tail(&d->entry, &pool->free_list); in srp_create_fr_pool()
458 * srp_fr_pool_get() - obtain a descriptor suitable for fast registration
466 spin_lock_irqsave(&pool->lock, flags); in srp_fr_pool_get()
467 if (!list_empty(&pool->free_list)) { in srp_fr_pool_get()
468 d = list_first_entry(&pool->free_list, typeof(*d), entry); in srp_fr_pool_get()
469 list_del(&d->entry); in srp_fr_pool_get()
471 spin_unlock_irqrestore(&pool->lock, flags); in srp_fr_pool_get()
477 * srp_fr_pool_put() - put an FR descriptor back in the free list
483 * desc->mr->rkey before calling this function.
491 spin_lock_irqsave(&pool->lock, flags); in srp_fr_pool_put()
493 list_add(&desc[i]->entry, &pool->free_list); in srp_fr_pool_put()
494 spin_unlock_irqrestore(&pool->lock, flags); in srp_fr_pool_put()
499 struct srp_device *dev = target->srp_host->srp_dev; in srp_alloc_fr_pool()
501 return srp_create_fr_pool(dev->dev, dev->pd, target->mr_pool_size, in srp_alloc_fr_pool()
502 dev->max_pages_per_mr); in srp_alloc_fr_pool()
506 * srp_destroy_qp() - destroy an RDMA queue pair
515 spin_lock_irq(&ch->lock); in srp_destroy_qp()
516 ib_process_cq_direct(ch->send_cq, -1); in srp_destroy_qp()
517 spin_unlock_irq(&ch->lock); in srp_destroy_qp()
519 ib_drain_qp(ch->qp); in srp_destroy_qp()
520 ib_destroy_qp(ch->qp); in srp_destroy_qp()
525 struct srp_target_port *target = ch->target; in srp_create_ch_ib()
526 struct srp_device *dev = target->srp_host->srp_dev; in srp_create_ch_ib()
527 const struct ib_device_attr *attr = &dev->dev->attrs; in srp_create_ch_ib()
532 const int m = 1 + dev->use_fast_reg * target->mr_per_cmd * 2; in srp_create_ch_ib()
537 return -ENOMEM; in srp_create_ch_ib()
540 recv_cq = ib_alloc_cq(dev->dev, ch, target->queue_size + 1, in srp_create_ch_ib()
541 ch->comp_vector, IB_POLL_SOFTIRQ); in srp_create_ch_ib()
547 send_cq = ib_alloc_cq(dev->dev, ch, m * target->queue_size, in srp_create_ch_ib()
548 ch->comp_vector, IB_POLL_DIRECT); in srp_create_ch_ib()
554 init_attr->event_handler = srp_qp_event; in srp_create_ch_ib()
555 init_attr->cap.max_send_wr = m * target->queue_size; in srp_create_ch_ib()
556 init_attr->cap.max_recv_wr = target->queue_size + 1; in srp_create_ch_ib()
557 init_attr->cap.max_recv_sge = 1; in srp_create_ch_ib()
558 init_attr->cap.max_send_sge = min(SRP_MAX_SGE, attr->max_send_sge); in srp_create_ch_ib()
559 init_attr->sq_sig_type = IB_SIGNAL_REQ_WR; in srp_create_ch_ib()
560 init_attr->qp_type = IB_QPT_RC; in srp_create_ch_ib()
561 init_attr->send_cq = send_cq; in srp_create_ch_ib()
562 init_attr->recv_cq = recv_cq; in srp_create_ch_ib()
564 ch->max_imm_sge = min(init_attr->cap.max_send_sge - 1U, 255U); in srp_create_ch_ib()
566 if (target->using_rdma_cm) { in srp_create_ch_ib()
567 ret = rdma_create_qp(ch->rdma_cm.cm_id, dev->pd, init_attr); in srp_create_ch_ib()
568 qp = ch->rdma_cm.cm_id->qp; in srp_create_ch_ib()
570 qp = ib_create_qp(dev->pd, init_attr); in srp_create_ch_ib()
581 dev_name(&dev->dev->dev), ret); in srp_create_ch_ib()
585 if (dev->use_fast_reg) { in srp_create_ch_ib()
589 shost_printk(KERN_WARNING, target->scsi_host, PFX in srp_create_ch_ib()
595 if (ch->qp) in srp_create_ch_ib()
597 if (ch->recv_cq) in srp_create_ch_ib()
598 ib_free_cq(ch->recv_cq); in srp_create_ch_ib()
599 if (ch->send_cq) in srp_create_ch_ib()
600 ib_free_cq(ch->send_cq); in srp_create_ch_ib()
602 ch->qp = qp; in srp_create_ch_ib()
603 ch->recv_cq = recv_cq; in srp_create_ch_ib()
604 ch->send_cq = send_cq; in srp_create_ch_ib()
606 if (dev->use_fast_reg) { in srp_create_ch_ib()
607 if (ch->fr_pool) in srp_create_ch_ib()
608 srp_destroy_fr_pool(ch->fr_pool); in srp_create_ch_ib()
609 ch->fr_pool = fr_pool; in srp_create_ch_ib()
616 if (target->using_rdma_cm) in srp_create_ch_ib()
617 rdma_destroy_qp(ch->rdma_cm.cm_id); in srp_create_ch_ib()
634 * invoked. Hence the ch->[rt]x_ring checks.
639 struct srp_device *dev = target->srp_host->srp_dev; in srp_free_ch_ib()
642 if (!ch->target) in srp_free_ch_ib()
645 if (target->using_rdma_cm) { in srp_free_ch_ib()
646 if (ch->rdma_cm.cm_id) { in srp_free_ch_ib()
647 rdma_destroy_id(ch->rdma_cm.cm_id); in srp_free_ch_ib()
648 ch->rdma_cm.cm_id = NULL; in srp_free_ch_ib()
651 if (ch->ib_cm.cm_id) { in srp_free_ch_ib()
652 ib_destroy_cm_id(ch->ib_cm.cm_id); in srp_free_ch_ib()
653 ch->ib_cm.cm_id = NULL; in srp_free_ch_ib()
658 if (!ch->qp) in srp_free_ch_ib()
661 if (dev->use_fast_reg) { in srp_free_ch_ib()
662 if (ch->fr_pool) in srp_free_ch_ib()
663 srp_destroy_fr_pool(ch->fr_pool); in srp_free_ch_ib()
667 ib_free_cq(ch->send_cq); in srp_free_ch_ib()
668 ib_free_cq(ch->recv_cq); in srp_free_ch_ib()
676 ch->target = NULL; in srp_free_ch_ib()
678 ch->qp = NULL; in srp_free_ch_ib()
679 ch->send_cq = ch->recv_cq = NULL; in srp_free_ch_ib()
681 if (ch->rx_ring) { in srp_free_ch_ib()
682 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
683 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_free_ch_ib()
684 kfree(ch->rx_ring); in srp_free_ch_ib()
685 ch->rx_ring = NULL; in srp_free_ch_ib()
687 if (ch->tx_ring) { in srp_free_ch_ib()
688 for (i = 0; i < target->queue_size; ++i) in srp_free_ch_ib()
689 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_free_ch_ib()
690 kfree(ch->tx_ring); in srp_free_ch_ib()
691 ch->tx_ring = NULL; in srp_free_ch_ib()
700 struct srp_target_port *target = ch->target; in srp_path_rec_completion()
702 ch->status = status; in srp_path_rec_completion()
704 shost_printk(KERN_ERR, target->scsi_host, in srp_path_rec_completion()
707 ch->ib_cm.path = *pathrec; in srp_path_rec_completion()
708 complete(&ch->done); in srp_path_rec_completion()
713 struct srp_target_port *target = ch->target; in srp_ib_lookup_path()
716 ch->ib_cm.path.numb_path = 1; in srp_ib_lookup_path()
718 init_completion(&ch->done); in srp_ib_lookup_path()
720 ch->ib_cm.path_query_id = ib_sa_path_rec_get(&srp_sa_client, in srp_ib_lookup_path()
721 target->srp_host->srp_dev->dev, in srp_ib_lookup_path()
722 target->srp_host->port, in srp_ib_lookup_path()
723 &ch->ib_cm.path, in srp_ib_lookup_path()
732 ch, &ch->ib_cm.path_query); in srp_ib_lookup_path()
733 if (ch->ib_cm.path_query_id < 0) in srp_ib_lookup_path()
734 return ch->ib_cm.path_query_id; in srp_ib_lookup_path()
736 ret = wait_for_completion_interruptible(&ch->done); in srp_ib_lookup_path()
740 if (ch->status < 0) in srp_ib_lookup_path()
741 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_lookup_path()
743 ch->ib_cm.path.sgid.raw, ch->ib_cm.path.dgid.raw, in srp_ib_lookup_path()
744 be16_to_cpu(target->ib_cm.pkey), in srp_ib_lookup_path()
745 be64_to_cpu(target->ib_cm.service_id)); in srp_ib_lookup_path()
747 return ch->status; in srp_ib_lookup_path()
752 struct srp_target_port *target = ch->target; in srp_rdma_lookup_path()
755 init_completion(&ch->done); in srp_rdma_lookup_path()
757 ret = rdma_resolve_route(ch->rdma_cm.cm_id, SRP_PATH_REC_TIMEOUT_MS); in srp_rdma_lookup_path()
761 wait_for_completion_interruptible(&ch->done); in srp_rdma_lookup_path()
763 if (ch->status != 0) in srp_rdma_lookup_path()
764 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_lookup_path()
767 return ch->status; in srp_rdma_lookup_path()
772 struct srp_target_port *target = ch->target; in srp_lookup_path()
774 return target->using_rdma_cm ? srp_rdma_lookup_path(ch) : in srp_lookup_path()
784 ret = ib_query_port(host->srp_dev->dev, host->port, &attr); in srp_get_subnet_timeout()
790 dev_name(&host->srp_dev->dev->dev), subnet_timeout); in srp_get_subnet_timeout()
798 struct srp_target_port *target = ch->target; in srp_send_req()
805 char *ipi, *tpi; in srp_send_req() local
810 return -ENOMEM; in srp_send_req()
812 req->ib_param.flow_control = 1; in srp_send_req()
813 req->ib_param.retry_count = target->tl_retry_count; in srp_send_req()
819 req->ib_param.responder_resources = 4; in srp_send_req()
820 req->ib_param.rnr_retry_count = 7; in srp_send_req()
821 req->ib_param.max_cm_retries = 15; in srp_send_req()
823 req->ib_req.opcode = SRP_LOGIN_REQ; in srp_send_req()
824 req->ib_req.tag = 0; in srp_send_req()
825 req->ib_req.req_it_iu_len = cpu_to_be32(max_iu_len); in srp_send_req()
826 req->ib_req.req_buf_fmt = cpu_to_be16(SRP_BUF_FORMAT_DIRECT | in srp_send_req()
828 req->ib_req.req_flags = (multich ? SRP_MULTICHAN_MULTI : in srp_send_req()
831 req->ib_req.req_flags |= SRP_IMMED_REQUESTED; in srp_send_req()
832 req->ib_req.imm_data_offset = cpu_to_be16(SRP_IMM_DATA_OFFSET); in srp_send_req()
835 if (target->using_rdma_cm) { in srp_send_req()
836 req->rdma_param.flow_control = req->ib_param.flow_control; in srp_send_req()
837 req->rdma_param.responder_resources = in srp_send_req()
838 req->ib_param.responder_resources; in srp_send_req()
839 req->rdma_param.initiator_depth = req->ib_param.initiator_depth; in srp_send_req()
840 req->rdma_param.retry_count = req->ib_param.retry_count; in srp_send_req()
841 req->rdma_param.rnr_retry_count = req->ib_param.rnr_retry_count; in srp_send_req()
842 req->rdma_param.private_data = &req->rdma_req; in srp_send_req()
843 req->rdma_param.private_data_len = sizeof(req->rdma_req); in srp_send_req()
845 req->rdma_req.opcode = req->ib_req.opcode; in srp_send_req()
846 req->rdma_req.tag = req->ib_req.tag; in srp_send_req()
847 req->rdma_req.req_it_iu_len = req->ib_req.req_it_iu_len; in srp_send_req()
848 req->rdma_req.req_buf_fmt = req->ib_req.req_buf_fmt; in srp_send_req()
849 req->rdma_req.req_flags = req->ib_req.req_flags; in srp_send_req()
850 req->rdma_req.imm_data_offset = req->ib_req.imm_data_offset; in srp_send_req()
852 ipi = req->rdma_req.initiator_port_id; in srp_send_req()
853 tpi = req->rdma_req.target_port_id; in srp_send_req()
857 subnet_timeout = srp_get_subnet_timeout(target->srp_host); in srp_send_req()
859 req->ib_param.primary_path = &ch->ib_cm.path; in srp_send_req()
860 req->ib_param.alternate_path = NULL; in srp_send_req()
861 req->ib_param.service_id = target->ib_cm.service_id; in srp_send_req()
862 get_random_bytes(&req->ib_param.starting_psn, 4); in srp_send_req()
863 req->ib_param.starting_psn &= 0xffffff; in srp_send_req()
864 req->ib_param.qp_num = ch->qp->qp_num; in srp_send_req()
865 req->ib_param.qp_type = ch->qp->qp_type; in srp_send_req()
866 req->ib_param.local_cm_response_timeout = subnet_timeout + 2; in srp_send_req()
867 req->ib_param.remote_cm_response_timeout = subnet_timeout + 2; in srp_send_req()
868 req->ib_param.private_data = &req->ib_req; in srp_send_req()
869 req->ib_param.private_data_len = sizeof(req->ib_req); in srp_send_req()
871 ipi = req->ib_req.initiator_port_id; in srp_send_req()
872 tpi = req->ib_req.target_port_id; in srp_send_req()
884 if (target->io_class == SRP_REV10_IB_IO_CLASS) { in srp_send_req()
885 memcpy(ipi, &target->sgid.global.interface_id, 8); in srp_send_req()
886 memcpy(ipi + 8, &target->initiator_ext, 8); in srp_send_req()
887 memcpy(tpi, &target->ioc_guid, 8); in srp_send_req()
888 memcpy(tpi + 8, &target->id_ext, 8); in srp_send_req()
890 memcpy(ipi, &target->initiator_ext, 8); in srp_send_req()
891 memcpy(ipi + 8, &target->sgid.global.interface_id, 8); in srp_send_req()
892 memcpy(tpi, &target->id_ext, 8); in srp_send_req()
893 memcpy(tpi + 8, &target->ioc_guid, 8); in srp_send_req()
902 shost_printk(KERN_DEBUG, target->scsi_host, in srp_send_req()
905 be64_to_cpu(target->ioc_guid)); in srp_send_req()
906 memset(ipi, 0, 8); in srp_send_req()
907 memcpy(ipi + 8, &target->srp_host->srp_dev->dev->node_guid, 8); in srp_send_req()
910 if (target->using_rdma_cm) in srp_send_req()
911 status = rdma_connect(ch->rdma_cm.cm_id, &req->rdma_param); in srp_send_req()
913 status = ib_send_cm_req(ch->ib_cm.cm_id, &req->ib_param); in srp_send_req()
924 spin_lock_irq(&target->lock); in srp_queue_remove_work()
925 if (target->state != SRP_TARGET_REMOVED) { in srp_queue_remove_work()
926 target->state = SRP_TARGET_REMOVED; in srp_queue_remove_work()
929 spin_unlock_irq(&target->lock); in srp_queue_remove_work()
932 queue_work(srp_remove_wq, &target->remove_work); in srp_queue_remove_work()
944 for (i = 0; i < target->ch_count; i++) { in srp_disconnect_target()
945 ch = &target->ch[i]; in srp_disconnect_target()
946 ch->connected = false; in srp_disconnect_target()
948 if (target->using_rdma_cm) { in srp_disconnect_target()
949 if (ch->rdma_cm.cm_id) in srp_disconnect_target()
950 rdma_disconnect(ch->rdma_cm.cm_id); in srp_disconnect_target()
952 if (ch->ib_cm.cm_id) in srp_disconnect_target()
953 ret = ib_send_cm_dreq(ch->ib_cm.cm_id, in srp_disconnect_target()
957 shost_printk(KERN_DEBUG, target->scsi_host, in srp_disconnect_target()
966 struct srp_device *dev = target->srp_host->srp_dev; in srp_exit_cmd_priv()
967 struct ib_device *ibdev = dev->dev; in srp_exit_cmd_priv()
970 kfree(req->fr_list); in srp_exit_cmd_priv()
971 if (req->indirect_dma_addr) { in srp_exit_cmd_priv()
972 ib_dma_unmap_single(ibdev, req->indirect_dma_addr, in srp_exit_cmd_priv()
973 target->indirect_size, in srp_exit_cmd_priv()
976 kfree(req->indirect_desc); in srp_exit_cmd_priv()
984 struct srp_device *srp_dev = target->srp_host->srp_dev; in srp_init_cmd_priv()
985 struct ib_device *ibdev = srp_dev->dev; in srp_init_cmd_priv()
988 int ret = -ENOMEM; in srp_init_cmd_priv()
990 if (srp_dev->use_fast_reg) { in srp_init_cmd_priv()
991 req->fr_list = kmalloc_array(target->mr_per_cmd, sizeof(void *), in srp_init_cmd_priv()
993 if (!req->fr_list) in srp_init_cmd_priv()
996 req->indirect_desc = kmalloc(target->indirect_size, GFP_KERNEL); in srp_init_cmd_priv()
997 if (!req->indirect_desc) in srp_init_cmd_priv()
1000 dma_addr = ib_dma_map_single(ibdev, req->indirect_desc, in srp_init_cmd_priv()
1001 target->indirect_size, in srp_init_cmd_priv()
1008 req->indirect_dma_addr = dma_addr; in srp_init_cmd_priv()
1016 * srp_del_scsi_host_attr() - Remove attributes defined in the host template.
1027 for (g = shost->hostt->shost_groups; *g; ++g) { in srp_del_scsi_host_attr()
1028 for (attr = (*g)->attrs; *attr; ++attr) { in srp_del_scsi_host_attr()
1032 device_remove_file(&shost->shost_dev, dev_attr); in srp_del_scsi_host_attr()
1042 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_target()
1044 srp_del_scsi_host_attr(target->scsi_host); in srp_remove_target()
1045 srp_rport_get(target->rport); in srp_remove_target()
1046 srp_remove_host(target->scsi_host); in srp_remove_target()
1047 scsi_remove_host(target->scsi_host); in srp_remove_target()
1048 srp_stop_rport_timers(target->rport); in srp_remove_target()
1050 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in srp_remove_target()
1051 for (i = 0; i < target->ch_count; i++) { in srp_remove_target()
1052 ch = &target->ch[i]; in srp_remove_target()
1055 cancel_work_sync(&target->tl_err_work); in srp_remove_target()
1056 srp_rport_put(target->rport); in srp_remove_target()
1057 kfree(target->ch); in srp_remove_target()
1058 target->ch = NULL; in srp_remove_target()
1060 spin_lock(&target->srp_host->target_lock); in srp_remove_target()
1061 list_del(&target->list); in srp_remove_target()
1062 spin_unlock(&target->srp_host->target_lock); in srp_remove_target()
1064 scsi_host_put(target->scsi_host); in srp_remove_target()
1072 WARN_ON_ONCE(target->state != SRP_TARGET_REMOVED); in srp_remove_work()
1079 struct srp_target_port *target = rport->lld_data; in srp_rport_delete()
1085 * srp_connected_ch() - number of connected channels
1092 for (i = 0; i < target->ch_count; i++) in srp_connected_ch()
1093 c += target->ch[i].connected; in srp_connected_ch()
1101 struct srp_target_port *target = ch->target; in srp_connect_ch()
1111 init_completion(&ch->done); in srp_connect_ch()
1115 ret = wait_for_completion_interruptible(&ch->done); in srp_connect_ch()
1125 ret = ch->status; in srp_connect_ch()
1128 ch->connected = true; in srp_connect_ch()
1141 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_connect_ch()
1143 ret = -ECONNRESET; in srp_connect_ch()
1152 return ret <= 0 ? ret : -ENODEV; in srp_connect_ch()
1171 wr.wr_cqe = &req->reg_cqe; in srp_inv_rkey()
1172 req->reg_cqe.done = srp_inv_rkey_err_done; in srp_inv_rkey()
1173 return ib_post_send(ch->qp, &wr, NULL); in srp_inv_rkey()
1180 struct srp_target_port *target = ch->target; in srp_unmap_data()
1181 struct srp_device *dev = target->srp_host->srp_dev; in srp_unmap_data()
1182 struct ib_device *ibdev = dev->dev; in srp_unmap_data()
1186 (scmnd->sc_data_direction != DMA_TO_DEVICE && in srp_unmap_data()
1187 scmnd->sc_data_direction != DMA_FROM_DEVICE)) in srp_unmap_data()
1190 if (dev->use_fast_reg) { in srp_unmap_data()
1193 for (i = req->nmdesc, pfr = req->fr_list; i > 0; i--, pfr++) { in srp_unmap_data()
1194 res = srp_inv_rkey(req, ch, (*pfr)->mr->rkey); in srp_unmap_data()
1196 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_unmap_data()
1198 (*pfr)->mr->rkey, res); in srp_unmap_data()
1200 &target->tl_err_work); in srp_unmap_data()
1203 if (req->nmdesc) in srp_unmap_data()
1204 srp_fr_pool_put(ch->fr_pool, req->fr_list, in srp_unmap_data()
1205 req->nmdesc); in srp_unmap_data()
1209 scmnd->sc_data_direction); in srp_unmap_data()
1213 * srp_claim_req - Take ownership of the scmnd associated with a request.
1217 * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
1218 * ownership of @req->scmnd if it equals @scmnd.
1230 spin_lock_irqsave(&ch->lock, flags); in srp_claim_req()
1231 if (req->scmnd && in srp_claim_req()
1232 (!sdev || req->scmnd->device == sdev) && in srp_claim_req()
1233 (!scmnd || req->scmnd == scmnd)) { in srp_claim_req()
1234 scmnd = req->scmnd; in srp_claim_req()
1235 req->scmnd = NULL; in srp_claim_req()
1239 spin_unlock_irqrestore(&ch->lock, flags); in srp_claim_req()
1245 * srp_free_req() - Unmap data and adjust ch->req_lim.
1249 * @req_lim_delta: Amount to be added to @target->req_lim.
1258 spin_lock_irqsave(&ch->lock, flags); in srp_free_req()
1259 ch->req_lim += req_lim_delta; in srp_free_req()
1260 spin_unlock_irqrestore(&ch->lock, flags); in srp_free_req()
1270 scmnd->result = result; in srp_finish_req()
1283 struct srp_target_port *target = context->srp_target; in srp_terminate_cmd()
1285 struct srp_rdma_ch *ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_terminate_cmd()
1288 srp_finish_req(ch, req, NULL, context->scsi_result); in srp_terminate_cmd()
1295 struct srp_target_port *target = rport->lld_data; in srp_terminate_io()
1299 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, &context); in srp_terminate_io()
1328 * serializes calls of this function via rport->mutex and also blocks
1333 struct srp_target_port *target = rport->lld_data; in srp_rport_reconnect()
1335 uint32_t max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_rport_reconnect()
1337 target->max_it_iu_size); in srp_rport_reconnect()
1343 if (target->state == SRP_TARGET_SCANNING) in srp_rport_reconnect()
1344 return -ENODEV; in srp_rport_reconnect()
1351 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1352 ch = &target->ch[i]; in srp_rport_reconnect()
1359 scsi_host_busy_iter(target->scsi_host, srp_terminate_cmd, in srp_rport_reconnect()
1362 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1363 ch = &target->ch[i]; in srp_rport_reconnect()
1371 INIT_LIST_HEAD(&ch->free_tx); in srp_rport_reconnect()
1372 for (j = 0; j < target->queue_size; ++j) in srp_rport_reconnect()
1373 list_add(&ch->tx_ring[j]->list, &ch->free_tx); in srp_rport_reconnect()
1376 target->qp_in_error = false; in srp_rport_reconnect()
1378 for (i = 0; i < target->ch_count; i++) { in srp_rport_reconnect()
1379 ch = &target->ch[i]; in srp_rport_reconnect()
1387 shost_printk(KERN_INFO, target->scsi_host, in srp_rport_reconnect()
1396 struct srp_direct_buf *desc = state->desc; in srp_map_desc()
1400 desc->va = cpu_to_be64(dma_addr); in srp_map_desc()
1401 desc->key = cpu_to_be32(rkey); in srp_map_desc()
1402 desc->len = cpu_to_be32(dma_len); in srp_map_desc()
1404 state->total_len += dma_len; in srp_map_desc()
1405 state->desc++; in srp_map_desc()
1406 state->ndesc++; in srp_map_desc()
1415 * Map up to sg_nents elements of state->sg where *sg_offset_p is the offset
1417 * *sg_offset_p is updated to the offset in state->sg[retval] of the first
1425 struct srp_target_port *target = ch->target; in srp_map_finish_fr()
1426 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_finish_fr()
1432 if (state->fr.next >= state->fr.end) { in srp_map_finish_fr()
1433 shost_printk(KERN_ERR, ch->target->scsi_host, in srp_map_finish_fr()
1435 ch->target->mr_per_cmd); in srp_map_finish_fr()
1436 return -ENOMEM; in srp_map_finish_fr()
1439 WARN_ON_ONCE(!dev->use_fast_reg); in srp_map_finish_fr()
1441 if (sg_nents == 1 && target->global_rkey) { in srp_map_finish_fr()
1444 srp_map_desc(state, sg_dma_address(state->sg) + sg_offset, in srp_map_finish_fr()
1445 sg_dma_len(state->sg) - sg_offset, in srp_map_finish_fr()
1446 target->global_rkey); in srp_map_finish_fr()
1452 desc = srp_fr_pool_get(ch->fr_pool); in srp_map_finish_fr()
1454 return -ENOMEM; in srp_map_finish_fr()
1456 rkey = ib_inc_rkey(desc->mr->rkey); in srp_map_finish_fr()
1457 ib_update_fast_reg_key(desc->mr, rkey); in srp_map_finish_fr()
1459 n = ib_map_mr_sg(desc->mr, state->sg, sg_nents, sg_offset_p, in srp_map_finish_fr()
1460 dev->mr_page_size); in srp_map_finish_fr()
1462 srp_fr_pool_put(ch->fr_pool, &desc, 1); in srp_map_finish_fr()
1464 dev_name(&req->scmnd->device->sdev_gendev), sg_nents, in srp_map_finish_fr()
1465 sg_offset_p ? *sg_offset_p : -1, n); in srp_map_finish_fr()
1469 WARN_ON_ONCE(desc->mr->length == 0); in srp_map_finish_fr()
1471 req->reg_cqe.done = srp_reg_mr_err_done; in srp_map_finish_fr()
1475 wr.wr.wr_cqe = &req->reg_cqe; in srp_map_finish_fr()
1478 wr.mr = desc->mr; in srp_map_finish_fr()
1479 wr.key = desc->mr->rkey; in srp_map_finish_fr()
1484 *state->fr.next++ = desc; in srp_map_finish_fr()
1485 state->nmdesc++; in srp_map_finish_fr()
1487 srp_map_desc(state, desc->mr->iova, in srp_map_finish_fr()
1488 desc->mr->length, desc->mr->rkey); in srp_map_finish_fr()
1490 err = ib_post_send(ch->qp, &wr.wr, NULL); in srp_map_finish_fr()
1492 WARN_ON_ONCE(err == -ENOMEM); in srp_map_finish_fr()
1505 state->fr.next = req->fr_list; in srp_map_sg_fr()
1506 state->fr.end = req->fr_list + ch->target->mr_per_cmd; in srp_map_sg_fr()
1507 state->sg = scat; in srp_map_sg_fr()
1519 count -= n; in srp_map_sg_fr()
1521 state->sg = sg_next(state->sg); in srp_map_sg_fr()
1531 struct srp_target_port *target = ch->target; in srp_map_sg_dma()
1537 target->global_rkey); in srp_map_sg_dma()
1554 struct srp_target_port *target = ch->target; in srp_map_idb()
1555 struct srp_device *dev = target->srp_host->srp_dev; in srp_map_idb()
1566 state.base_dma_addr = req->indirect_dma_addr; in srp_map_idb()
1569 if (dev->use_fast_reg) { in srp_map_idb()
1571 sg_init_one(idb_sg, req->indirect_desc, idb_len); in srp_map_idb()
1572 idb_sg->dma_address = req->indirect_dma_addr; /* hack! */ in srp_map_idb()
1574 idb_sg->dma_length = idb_sg->length; /* hack^2 */ in srp_map_idb()
1581 return -EINVAL; in srp_map_idb()
1593 struct srp_device *dev = ch->target->srp_host->srp_dev; in srp_check_mapping()
1598 for (i = 0; i < state->ndesc; i++) in srp_check_mapping()
1599 desc_len += be32_to_cpu(req->indirect_desc[i].len); in srp_check_mapping()
1600 if (dev->use_fast_reg) in srp_check_mapping()
1601 for (i = 0, pfr = req->fr_list; i < state->nmdesc; i++, pfr++) in srp_check_mapping()
1602 mr_len += (*pfr)->mr->length; in srp_check_mapping()
1603 if (desc_len != scsi_bufflen(req->scmnd) || in srp_check_mapping()
1604 mr_len > scsi_bufflen(req->scmnd)) in srp_check_mapping()
1606 scsi_bufflen(req->scmnd), desc_len, mr_len, in srp_check_mapping()
1607 state->ndesc, state->nmdesc); in srp_check_mapping()
1611 * srp_map_data() - map SCSI data buffer onto an SRP request
1623 struct srp_target_port *target = ch->target; in srp_map_data()
1625 struct srp_cmd *cmd = req->cmd->buf; in srp_map_data()
1636 req->cmd->num_sge = 1; in srp_map_data()
1638 if (!scsi_sglist(scmnd) || scmnd->sc_data_direction == DMA_NONE) in srp_map_data()
1639 return sizeof(struct srp_cmd) + cmd->add_cdb_len; in srp_map_data()
1641 if (scmnd->sc_data_direction != DMA_FROM_DEVICE && in srp_map_data()
1642 scmnd->sc_data_direction != DMA_TO_DEVICE) { in srp_map_data()
1643 shost_printk(KERN_WARNING, target->scsi_host, in srp_map_data()
1645 scmnd->sc_data_direction); in srp_map_data()
1646 return -EINVAL; in srp_map_data()
1653 dev = target->srp_host->srp_dev; in srp_map_data()
1654 ibdev = dev->dev; in srp_map_data()
1656 count = ib_dma_map_sg(ibdev, scat, nents, scmnd->sc_data_direction); in srp_map_data()
1658 return -EIO; in srp_map_data()
1660 if (ch->use_imm_data && in srp_map_data()
1661 count <= ch->max_imm_sge && in srp_map_data()
1662 SRP_IMM_DATA_OFFSET + data_len <= ch->max_it_iu_len && in srp_map_data()
1663 scmnd->sc_data_direction == DMA_TO_DEVICE) { in srp_map_data()
1665 struct ib_sge *sge = &req->cmd->sge[1]; in srp_map_data()
1669 req->nmdesc = 0; in srp_map_data()
1670 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1671 buf->len = cpu_to_be32(data_len); in srp_map_data()
1676 sge[i].lkey = target->lkey; in srp_map_data()
1678 req->cmd->num_sge += count; in srp_map_data()
1683 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + in srp_map_data()
1686 if (count == 1 && target->global_rkey) { in srp_map_data()
1695 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1696 buf->va = cpu_to_be64(sg_dma_address(scat)); in srp_map_data()
1697 buf->key = cpu_to_be32(target->global_rkey); in srp_map_data()
1698 buf->len = cpu_to_be32(sg_dma_len(scat)); in srp_map_data()
1700 req->nmdesc = 0; in srp_map_data()
1708 indirect_hdr = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1710 ib_dma_sync_single_for_cpu(ibdev, req->indirect_dma_addr, in srp_map_data()
1711 target->indirect_size, DMA_TO_DEVICE); in srp_map_data()
1714 state.desc = req->indirect_desc; in srp_map_data()
1715 if (dev->use_fast_reg) in srp_map_data()
1719 req->nmdesc = state.nmdesc; in srp_map_data()
1738 * Memory registration collapsed the sg-list into one entry, in srp_map_data()
1743 buf = (void *)cmd->add_data + cmd->add_cdb_len; in srp_map_data()
1744 *buf = req->indirect_desc[0]; in srp_map_data()
1748 if (unlikely(target->cmd_sg_cnt < state.ndesc && in srp_map_data()
1749 !target->allow_ext_sg)) { in srp_map_data()
1750 shost_printk(KERN_ERR, target->scsi_host, in srp_map_data()
1752 ret = -EIO; in srp_map_data()
1756 count = min(state.ndesc, target->cmd_sg_cnt); in srp_map_data()
1761 len = sizeof(struct srp_cmd) + cmd->add_cdb_len + in srp_map_data()
1765 memcpy(indirect_hdr->desc_list, req->indirect_desc, in srp_map_data()
1768 if (!target->global_rkey) { in srp_map_data()
1773 req->nmdesc++; in srp_map_data()
1775 idb_rkey = cpu_to_be32(target->global_rkey); in srp_map_data()
1778 indirect_hdr->table_desc.va = cpu_to_be64(req->indirect_dma_addr); in srp_map_data()
1779 indirect_hdr->table_desc.key = idb_rkey; in srp_map_data()
1780 indirect_hdr->table_desc.len = cpu_to_be32(table_len); in srp_map_data()
1781 indirect_hdr->len = cpu_to_be32(state.total_len); in srp_map_data()
1783 if (scmnd->sc_data_direction == DMA_TO_DEVICE) in srp_map_data()
1784 cmd->data_out_desc_cnt = count; in srp_map_data()
1786 cmd->data_in_desc_cnt = count; in srp_map_data()
1788 ib_dma_sync_single_for_device(ibdev, req->indirect_dma_addr, table_len, in srp_map_data()
1792 if (scmnd->sc_data_direction == DMA_TO_DEVICE) in srp_map_data()
1793 cmd->buf_fmt = fmt << 4; in srp_map_data()
1795 cmd->buf_fmt = fmt; in srp_map_data()
1801 if (ret == -ENOMEM && req->nmdesc >= target->mr_pool_size) in srp_map_data()
1802 ret = -E2BIG; in srp_map_data()
1814 spin_lock_irqsave(&ch->lock, flags); in srp_put_tx_iu()
1815 list_add(&iu->list, &ch->free_tx); in srp_put_tx_iu()
1817 ++ch->req_lim; in srp_put_tx_iu()
1818 spin_unlock_irqrestore(&ch->lock, flags); in srp_put_tx_iu()
1822 * Must be called with ch->lock held to protect req_lim and free_tx.
1828 * - SRP_IU_CMD: SRP_CMD_SQ_SIZE, since the SCSI mid-layer never queues
1830 * - SRP_IU_TSK_MGMT: SRP_TSK_MGMT_SQ_SIZE.
1831 * - SRP_IU_RSP: 1, since a conforming SRP target never sends more than
1837 struct srp_target_port *target = ch->target; in __srp_get_tx_iu()
1841 lockdep_assert_held(&ch->lock); in __srp_get_tx_iu()
1843 ib_process_cq_direct(ch->send_cq, -1); in __srp_get_tx_iu()
1845 if (list_empty(&ch->free_tx)) in __srp_get_tx_iu()
1850 if (ch->req_lim <= rsv) { in __srp_get_tx_iu()
1851 ++target->zero_req_lim; in __srp_get_tx_iu()
1855 --ch->req_lim; in __srp_get_tx_iu()
1858 iu = list_first_entry(&ch->free_tx, struct srp_iu, list); in __srp_get_tx_iu()
1859 list_del(&iu->list); in __srp_get_tx_iu()
1865 * be called without ch->lock being held. If ib_drain_sq() dequeues a WQE
1870 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_send_done()
1871 struct srp_rdma_ch *ch = cq->cq_context; in srp_send_done()
1873 if (unlikely(wc->status != IB_WC_SUCCESS)) { in srp_send_done()
1878 lockdep_assert_held(&ch->lock); in srp_send_done()
1880 list_add(&iu->list, &ch->free_tx); in srp_send_done()
1884 * srp_post_send() - send an SRP information unit
1891 struct srp_target_port *target = ch->target; in srp_post_send()
1894 if (WARN_ON_ONCE(iu->num_sge > SRP_MAX_SGE)) in srp_post_send()
1895 return -EINVAL; in srp_post_send()
1897 iu->sge[0].addr = iu->dma; in srp_post_send()
1898 iu->sge[0].length = len; in srp_post_send()
1899 iu->sge[0].lkey = target->lkey; in srp_post_send()
1901 iu->cqe.done = srp_send_done; in srp_post_send()
1904 wr.wr_cqe = &iu->cqe; in srp_post_send()
1905 wr.sg_list = &iu->sge[0]; in srp_post_send()
1906 wr.num_sge = iu->num_sge; in srp_post_send()
1910 return ib_post_send(ch->qp, &wr, NULL); in srp_post_send()
1915 struct srp_target_port *target = ch->target; in srp_post_recv()
1919 list.addr = iu->dma; in srp_post_recv()
1920 list.length = iu->size; in srp_post_recv()
1921 list.lkey = target->lkey; in srp_post_recv()
1923 iu->cqe.done = srp_recv_done; in srp_post_recv()
1926 wr.wr_cqe = &iu->cqe; in srp_post_recv()
1930 return ib_post_recv(ch->qp, &wr, NULL); in srp_post_recv()
1935 struct srp_target_port *target = ch->target; in srp_process_rsp()
1940 if (unlikely(rsp->tag & SRP_TAG_TSK_MGMT)) { in srp_process_rsp()
1941 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1942 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1943 if (rsp->tag == ch->tsk_mgmt_tag) { in srp_process_rsp()
1944 ch->tsk_mgmt_status = -1; in srp_process_rsp()
1945 if (be32_to_cpu(rsp->resp_data_len) >= 4) in srp_process_rsp()
1946 ch->tsk_mgmt_status = rsp->data[3]; in srp_process_rsp()
1947 complete(&ch->tsk_mgmt_done); in srp_process_rsp()
1949 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1951 rsp->tag); in srp_process_rsp()
1953 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1955 scmnd = scsi_host_find_tag(target->scsi_host, rsp->tag); in srp_process_rsp()
1961 shost_printk(KERN_ERR, target->scsi_host, in srp_process_rsp()
1963 rsp->tag, ch - target->ch, ch->qp->qp_num); in srp_process_rsp()
1965 spin_lock_irqsave(&ch->lock, flags); in srp_process_rsp()
1966 ch->req_lim += be32_to_cpu(rsp->req_lim_delta); in srp_process_rsp()
1967 spin_unlock_irqrestore(&ch->lock, flags); in srp_process_rsp()
1971 scmnd->result = rsp->status; in srp_process_rsp()
1973 if (rsp->flags & SRP_RSP_FLAG_SNSVALID) { in srp_process_rsp()
1974 memcpy(scmnd->sense_buffer, rsp->data + in srp_process_rsp()
1975 be32_to_cpu(rsp->resp_data_len), in srp_process_rsp()
1976 min_t(int, be32_to_cpu(rsp->sense_data_len), in srp_process_rsp()
1980 if (unlikely(rsp->flags & SRP_RSP_FLAG_DIUNDER)) in srp_process_rsp()
1981 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt)); in srp_process_rsp()
1982 else if (unlikely(rsp->flags & SRP_RSP_FLAG_DOUNDER)) in srp_process_rsp()
1983 scsi_set_resid(scmnd, be32_to_cpu(rsp->data_out_res_cnt)); in srp_process_rsp()
1986 be32_to_cpu(rsp->req_lim_delta)); in srp_process_rsp()
1995 struct srp_target_port *target = ch->target; in srp_response_common()
1996 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_response_common()
2001 spin_lock_irqsave(&ch->lock, flags); in srp_response_common()
2002 ch->req_lim += req_delta; in srp_response_common()
2004 spin_unlock_irqrestore(&ch->lock, flags); in srp_response_common()
2007 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2012 iu->num_sge = 1; in srp_response_common()
2013 ib_dma_sync_single_for_cpu(dev, iu->dma, len, DMA_TO_DEVICE); in srp_response_common()
2014 memcpy(iu->buf, rsp, len); in srp_response_common()
2015 ib_dma_sync_single_for_device(dev, iu->dma, len, DMA_TO_DEVICE); in srp_response_common()
2019 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_response_common()
2032 .tag = req->tag, in srp_process_cred_req()
2034 s32 delta = be32_to_cpu(req->req_lim_delta); in srp_process_cred_req()
2037 shost_printk(KERN_ERR, ch->target->scsi_host, PFX in srp_process_cred_req()
2044 struct srp_target_port *target = ch->target; in srp_process_aer_req()
2047 .tag = req->tag, in srp_process_aer_req()
2049 s32 delta = be32_to_cpu(req->req_lim_delta); in srp_process_aer_req()
2051 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2052 "ignoring AER for LUN %llu\n", scsilun_to_int(&req->lun)); in srp_process_aer_req()
2055 shost_printk(KERN_ERR, target->scsi_host, PFX in srp_process_aer_req()
2061 struct srp_iu *iu = container_of(wc->wr_cqe, struct srp_iu, cqe); in srp_recv_done()
2062 struct srp_rdma_ch *ch = cq->cq_context; in srp_recv_done()
2063 struct srp_target_port *target = ch->target; in srp_recv_done()
2064 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_recv_done()
2068 if (unlikely(wc->status != IB_WC_SUCCESS)) { in srp_recv_done()
2073 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2076 opcode = *(u8 *) iu->buf; in srp_recv_done()
2079 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2082 iu->buf, wc->byte_len, true); in srp_recv_done()
2087 srp_process_rsp(ch, iu->buf); in srp_recv_done()
2091 srp_process_cred_req(ch, iu->buf); in srp_recv_done()
2095 srp_process_aer_req(ch, iu->buf); in srp_recv_done()
2100 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2105 shost_printk(KERN_WARNING, target->scsi_host, in srp_recv_done()
2110 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_ti_iu_len, in srp_recv_done()
2115 shost_printk(KERN_ERR, target->scsi_host, in srp_recv_done()
2120 * srp_tl_err_work() - handle a transport layer error
2124 * hence the target->rport test.
2131 if (target->rport) in srp_tl_err_work()
2132 srp_start_tl_fail_timers(target->rport); in srp_tl_err_work()
2138 struct srp_rdma_ch *ch = cq->cq_context; in srp_handle_qp_err()
2139 struct srp_target_port *target = ch->target; in srp_handle_qp_err()
2141 if (ch->connected && !target->qp_in_error) { in srp_handle_qp_err()
2142 shost_printk(KERN_ERR, target->scsi_host, in srp_handle_qp_err()
2144 opname, ib_wc_status_msg(wc->status), wc->status, in srp_handle_qp_err()
2145 wc->wr_cqe); in srp_handle_qp_err()
2146 queue_work(system_long_wq, &target->tl_err_work); in srp_handle_qp_err()
2148 target->qp_in_error = true; in srp_handle_qp_err()
2164 scmnd->result = srp_chkready(target->rport); in srp_queuecommand()
2165 if (unlikely(scmnd->result)) in srp_queuecommand()
2168 WARN_ON_ONCE(rq->tag < 0); in srp_queuecommand()
2170 ch = &target->ch[blk_mq_unique_tag_to_hwq(tag)]; in srp_queuecommand()
2172 spin_lock_irqsave(&ch->lock, flags); in srp_queuecommand()
2174 spin_unlock_irqrestore(&ch->lock, flags); in srp_queuecommand()
2179 dev = target->srp_host->srp_dev->dev; in srp_queuecommand()
2180 ib_dma_sync_single_for_cpu(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2183 cmd = iu->buf; in srp_queuecommand()
2186 cmd->opcode = SRP_CMD; in srp_queuecommand()
2187 int_to_scsilun(scmnd->device->lun, &cmd->lun); in srp_queuecommand()
2188 cmd->tag = tag; in srp_queuecommand()
2189 memcpy(cmd->cdb, scmnd->cmnd, scmnd->cmd_len); in srp_queuecommand()
2190 if (unlikely(scmnd->cmd_len > sizeof(cmd->cdb))) { in srp_queuecommand()
2191 cmd->add_cdb_len = round_up(scmnd->cmd_len - sizeof(cmd->cdb), in srp_queuecommand()
2193 if (WARN_ON_ONCE(cmd->add_cdb_len > SRP_MAX_ADD_CDB_LEN)) in srp_queuecommand()
2197 req->scmnd = scmnd; in srp_queuecommand()
2198 req->cmd = iu; in srp_queuecommand()
2202 shost_printk(KERN_ERR, target->scsi_host, in srp_queuecommand()
2205 * If we ran out of memory descriptors (-ENOMEM) because an in srp_queuecommand()
2207 * max_pages_per_mr sg-list elements, tell the SCSI mid-layer in srp_queuecommand()
2210 scmnd->result = len == -ENOMEM ? in srp_queuecommand()
2215 ib_dma_sync_single_for_device(dev, iu->dma, ch->max_it_iu_len, in srp_queuecommand()
2219 shost_printk(KERN_ERR, target->scsi_host, PFX "Send failed\n"); in srp_queuecommand()
2220 scmnd->result = DID_ERROR << 16; in srp_queuecommand()
2236 req->scmnd = NULL; in srp_queuecommand()
2239 if (scmnd->result) { in srp_queuecommand()
2255 struct srp_target_port *target = ch->target; in srp_alloc_iu_bufs()
2258 ch->rx_ring = kcalloc(target->queue_size, sizeof(*ch->rx_ring), in srp_alloc_iu_bufs()
2260 if (!ch->rx_ring) in srp_alloc_iu_bufs()
2262 ch->tx_ring = kcalloc(target->queue_size, sizeof(*ch->tx_ring), in srp_alloc_iu_bufs()
2264 if (!ch->tx_ring) in srp_alloc_iu_bufs()
2267 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2268 ch->rx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2269 ch->max_ti_iu_len, in srp_alloc_iu_bufs()
2271 if (!ch->rx_ring[i]) in srp_alloc_iu_bufs()
2275 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2276 ch->tx_ring[i] = srp_alloc_iu(target->srp_host, in srp_alloc_iu_bufs()
2277 ch->max_it_iu_len, in srp_alloc_iu_bufs()
2279 if (!ch->tx_ring[i]) in srp_alloc_iu_bufs()
2282 list_add(&ch->tx_ring[i]->list, &ch->free_tx); in srp_alloc_iu_bufs()
2288 for (i = 0; i < target->queue_size; ++i) { in srp_alloc_iu_bufs()
2289 srp_free_iu(target->srp_host, ch->rx_ring[i]); in srp_alloc_iu_bufs()
2290 srp_free_iu(target->srp_host, ch->tx_ring[i]); in srp_alloc_iu_bufs()
2295 kfree(ch->tx_ring); in srp_alloc_iu_bufs()
2296 ch->tx_ring = NULL; in srp_alloc_iu_bufs()
2297 kfree(ch->rx_ring); in srp_alloc_iu_bufs()
2298 ch->rx_ring = NULL; in srp_alloc_iu_bufs()
2300 return -ENOMEM; in srp_alloc_iu_bufs()
2317 * Set target->rq_tmo_jiffies to one second more than the largest time in srp_compute_rq_tmo()
2319 * C9-140..142 in the IBTA spec for more information about how to in srp_compute_rq_tmo()
2322 T_tr_ns = 4096 * (1ULL << qp_attr->timeout); in srp_compute_rq_tmo()
2323 max_compl_time_ms = qp_attr->retry_cnt * 4 * T_tr_ns; in srp_compute_rq_tmo()
2334 struct srp_target_port *target = ch->target; in srp_cm_rep_handler()
2340 if (lrsp->opcode == SRP_LOGIN_RSP) { in srp_cm_rep_handler()
2341 ch->max_ti_iu_len = be32_to_cpu(lrsp->max_ti_iu_len); in srp_cm_rep_handler()
2342 ch->req_lim = be32_to_cpu(lrsp->req_lim_delta); in srp_cm_rep_handler()
2343 ch->use_imm_data = srp_use_imm_data && in srp_cm_rep_handler()
2344 (lrsp->rsp_flags & SRP_LOGIN_RSP_IMMED_SUPP); in srp_cm_rep_handler()
2345 ch->max_it_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in srp_cm_rep_handler()
2346 ch->use_imm_data, in srp_cm_rep_handler()
2347 target->max_it_iu_size); in srp_cm_rep_handler()
2348 WARN_ON_ONCE(ch->max_it_iu_len > in srp_cm_rep_handler()
2349 be32_to_cpu(lrsp->max_it_iu_len)); in srp_cm_rep_handler()
2351 if (ch->use_imm_data) in srp_cm_rep_handler()
2352 shost_printk(KERN_DEBUG, target->scsi_host, in srp_cm_rep_handler()
2357 * bounce requests back to the SCSI mid-layer. in srp_cm_rep_handler()
2359 target->scsi_host->can_queue in srp_cm_rep_handler()
2360 = min(ch->req_lim - SRP_TSK_MGMT_SQ_SIZE, in srp_cm_rep_handler()
2361 target->scsi_host->can_queue); in srp_cm_rep_handler()
2362 target->scsi_host->cmd_per_lun in srp_cm_rep_handler()
2363 = min_t(int, target->scsi_host->can_queue, in srp_cm_rep_handler()
2364 target->scsi_host->cmd_per_lun); in srp_cm_rep_handler()
2366 shost_printk(KERN_WARNING, target->scsi_host, in srp_cm_rep_handler()
2367 PFX "Unhandled RSP opcode %#x\n", lrsp->opcode); in srp_cm_rep_handler()
2368 ret = -ECONNRESET; in srp_cm_rep_handler()
2372 if (!ch->rx_ring) { in srp_cm_rep_handler()
2378 for (i = 0; i < target->queue_size; i++) { in srp_cm_rep_handler()
2379 struct srp_iu *iu = ch->rx_ring[i]; in srp_cm_rep_handler()
2386 if (!target->using_rdma_cm) { in srp_cm_rep_handler()
2387 ret = -ENOMEM; in srp_cm_rep_handler()
2392 qp_attr->qp_state = IB_QPS_RTR; in srp_cm_rep_handler()
2397 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2401 qp_attr->qp_state = IB_QPS_RTS; in srp_cm_rep_handler()
2406 target->rq_tmo_jiffies = srp_compute_rq_tmo(qp_attr, attr_mask); in srp_cm_rep_handler()
2408 ret = ib_modify_qp(ch->qp, qp_attr, attr_mask); in srp_cm_rep_handler()
2419 ch->status = ret; in srp_cm_rep_handler()
2426 struct srp_target_port *target = ch->target; in srp_ib_cm_rej_handler()
2427 struct Scsi_Host *shost = target->scsi_host; in srp_ib_cm_rej_handler()
2432 switch (event->param.rej_rcvd.reason) { in srp_ib_cm_rej_handler()
2434 cpi = event->param.rej_rcvd.ari; in srp_ib_cm_rej_handler()
2435 dlid = be16_to_cpu(cpi->redirect_lid); in srp_ib_cm_rej_handler()
2436 sa_path_set_dlid(&ch->ib_cm.path, dlid); in srp_ib_cm_rej_handler()
2437 ch->ib_cm.path.pkey = cpi->redirect_pkey; in srp_ib_cm_rej_handler()
2438 cm_id->remote_cm_qpn = be32_to_cpu(cpi->redirect_qp) & 0x00ffffff; in srp_ib_cm_rej_handler()
2439 memcpy(ch->ib_cm.path.dgid.raw, cpi->redirect_gid, 16); in srp_ib_cm_rej_handler()
2441 ch->status = dlid ? SRP_DLID_REDIRECT : SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2446 union ib_gid *dgid = &ch->ib_cm.path.dgid; in srp_ib_cm_rej_handler()
2453 memcpy(dgid->raw, event->param.rej_rcvd.ari, 16); in srp_ib_cm_rej_handler()
2457 be64_to_cpu(dgid->global.subnet_prefix), in srp_ib_cm_rej_handler()
2458 be64_to_cpu(dgid->global.interface_id)); in srp_ib_cm_rej_handler()
2460 ch->status = SRP_PORT_REDIRECT; in srp_ib_cm_rej_handler()
2464 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2471 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2475 opcode = *(u8 *) event->private_data; in srp_ib_cm_rej_handler()
2477 struct srp_login_rej *rej = event->private_data; in srp_ib_cm_rej_handler()
2478 u32 reason = be32_to_cpu(rej->reason); in srp_ib_cm_rej_handler()
2486 target->sgid.raw, in srp_ib_cm_rej_handler()
2487 target->ib_cm.orig_dgid.raw, in srp_ib_cm_rej_handler()
2493 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2498 ch->status = SRP_STALE_CONN; in srp_ib_cm_rej_handler()
2503 event->param.rej_rcvd.reason); in srp_ib_cm_rej_handler()
2504 ch->status = -ECONNRESET; in srp_ib_cm_rej_handler()
2511 struct srp_rdma_ch *ch = cm_id->context; in srp_ib_cm_handler()
2512 struct srp_target_port *target = ch->target; in srp_ib_cm_handler()
2515 switch (event->event) { in srp_ib_cm_handler()
2517 shost_printk(KERN_DEBUG, target->scsi_host, in srp_ib_cm_handler()
2520 ch->status = -ECONNRESET; in srp_ib_cm_handler()
2525 srp_cm_rep_handler(cm_id, event->private_data, ch); in srp_ib_cm_handler()
2529 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_ib_cm_handler()
2536 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2537 PFX "DREQ received - connection closed\n"); in srp_ib_cm_handler()
2538 ch->connected = false; in srp_ib_cm_handler()
2540 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2542 queue_work(system_long_wq, &target->tl_err_work); in srp_ib_cm_handler()
2546 shost_printk(KERN_ERR, target->scsi_host, in srp_ib_cm_handler()
2550 ch->status = 0; in srp_ib_cm_handler()
2559 shost_printk(KERN_WARNING, target->scsi_host, in srp_ib_cm_handler()
2560 PFX "Unhandled CM event %d\n", event->event); in srp_ib_cm_handler()
2565 complete(&ch->done); in srp_ib_cm_handler()
2573 struct srp_target_port *target = ch->target; in srp_rdma_cm_rej_handler()
2574 struct Scsi_Host *shost = target->scsi_host; in srp_rdma_cm_rej_handler()
2577 switch (event->status) { in srp_rdma_cm_rej_handler()
2581 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2585 opcode = *(u8 *) event->param.conn.private_data; in srp_rdma_cm_rej_handler()
2589 event->param.conn.private_data; in srp_rdma_cm_rej_handler()
2590 u32 reason = be32_to_cpu(rej->reason); in srp_rdma_cm_rej_handler()
2603 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2609 ch->status = SRP_STALE_CONN; in srp_rdma_cm_rej_handler()
2614 event->status); in srp_rdma_cm_rej_handler()
2615 ch->status = -ECONNRESET; in srp_rdma_cm_rej_handler()
2623 struct srp_rdma_ch *ch = cm_id->context; in srp_rdma_cm_handler()
2624 struct srp_target_port *target = ch->target; in srp_rdma_cm_handler()
2627 switch (event->event) { in srp_rdma_cm_handler()
2629 ch->status = 0; in srp_rdma_cm_handler()
2634 ch->status = -ENXIO; in srp_rdma_cm_handler()
2639 ch->status = 0; in srp_rdma_cm_handler()
2645 ch->status = -EHOSTUNREACH; in srp_rdma_cm_handler()
2650 shost_printk(KERN_DEBUG, target->scsi_host, in srp_rdma_cm_handler()
2653 ch->status = -ECONNRESET; in srp_rdma_cm_handler()
2658 srp_cm_rep_handler(NULL, event->param.conn.private_data, ch); in srp_rdma_cm_handler()
2662 shost_printk(KERN_DEBUG, target->scsi_host, PFX "REJ received\n"); in srp_rdma_cm_handler()
2669 if (ch->connected) { in srp_rdma_cm_handler()
2670 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2672 rdma_disconnect(ch->rdma_cm.cm_id); in srp_rdma_cm_handler()
2674 ch->status = 0; in srp_rdma_cm_handler()
2675 queue_work(system_long_wq, &target->tl_err_work); in srp_rdma_cm_handler()
2680 shost_printk(KERN_ERR, target->scsi_host, in srp_rdma_cm_handler()
2684 ch->status = 0; in srp_rdma_cm_handler()
2688 shost_printk(KERN_WARNING, target->scsi_host, in srp_rdma_cm_handler()
2689 PFX "Unhandled CM event %d\n", event->event); in srp_rdma_cm_handler()
2694 complete(&ch->done); in srp_rdma_cm_handler()
2700 * srp_change_queue_depth - setting device queue depth
2709 if (!sdev->tagged_supported) in srp_change_queue_depth()
2717 struct srp_target_port *target = ch->target; in srp_send_tsk_mgmt()
2718 struct srp_rport *rport = target->rport; in srp_send_tsk_mgmt()
2719 struct ib_device *dev = target->srp_host->srp_dev->dev; in srp_send_tsk_mgmt()
2724 if (!ch->connected || target->qp_in_error) in srp_send_tsk_mgmt()
2725 return -1; in srp_send_tsk_mgmt()
2731 mutex_lock(&rport->mutex); in srp_send_tsk_mgmt()
2732 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2734 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2737 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2739 return -1; in srp_send_tsk_mgmt()
2742 iu->num_sge = 1; in srp_send_tsk_mgmt()
2744 ib_dma_sync_single_for_cpu(dev, iu->dma, sizeof *tsk_mgmt, in srp_send_tsk_mgmt()
2746 tsk_mgmt = iu->buf; in srp_send_tsk_mgmt()
2749 tsk_mgmt->opcode = SRP_TSK_MGMT; in srp_send_tsk_mgmt()
2750 int_to_scsilun(lun, &tsk_mgmt->lun); in srp_send_tsk_mgmt()
2751 tsk_mgmt->tsk_mgmt_func = func; in srp_send_tsk_mgmt()
2752 tsk_mgmt->task_tag = req_tag; in srp_send_tsk_mgmt()
2754 spin_lock_irq(&ch->lock); in srp_send_tsk_mgmt()
2755 ch->tsk_mgmt_tag = (ch->tsk_mgmt_tag + 1) | SRP_TAG_TSK_MGMT; in srp_send_tsk_mgmt()
2756 tsk_mgmt->tag = ch->tsk_mgmt_tag; in srp_send_tsk_mgmt()
2757 spin_unlock_irq(&ch->lock); in srp_send_tsk_mgmt()
2759 init_completion(&ch->tsk_mgmt_done); in srp_send_tsk_mgmt()
2761 ib_dma_sync_single_for_device(dev, iu->dma, sizeof *tsk_mgmt, in srp_send_tsk_mgmt()
2765 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2767 return -1; in srp_send_tsk_mgmt()
2769 res = wait_for_completion_timeout(&ch->tsk_mgmt_done, in srp_send_tsk_mgmt()
2772 *status = ch->tsk_mgmt_status; in srp_send_tsk_mgmt()
2773 mutex_unlock(&rport->mutex); in srp_send_tsk_mgmt()
2777 return res > 0 ? 0 : -1; in srp_send_tsk_mgmt()
2782 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_abort()
2788 shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n"); in srp_abort()
2792 if (WARN_ON_ONCE(ch_idx >= target->ch_count)) in srp_abort()
2794 ch = &target->ch[ch_idx]; in srp_abort()
2797 shost_printk(KERN_ERR, target->scsi_host, in srp_abort()
2799 if (srp_send_tsk_mgmt(ch, tag, scmnd->device->lun, in srp_abort()
2804 if (target->rport->state == SRP_RPORT_LOST) in srp_abort()
2812 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_device()
2816 shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n"); in srp_reset_device()
2818 ch = &target->ch[0]; in srp_reset_device()
2819 if (srp_send_tsk_mgmt(ch, SRP_TAG_NO_REQ, scmnd->device->lun, in srp_reset_device()
2830 struct srp_target_port *target = host_to_target(scmnd->device->host); in srp_reset_host()
2832 shost_printk(KERN_ERR, target->scsi_host, PFX "SRP reset_host called\n"); in srp_reset_host()
2834 return srp_reconnect_rport(target->rport) == 0 ? SUCCESS : FAILED; in srp_reset_host()
2839 struct Scsi_Host *shost = dev_to_shost(starget->dev.parent); in srp_target_alloc()
2842 if (target->target_can_queue) in srp_target_alloc()
2843 starget->can_queue = target->target_can_queue; in srp_target_alloc()
2849 struct Scsi_Host *shost = sdev->host; in srp_slave_configure()
2851 struct request_queue *q = sdev->request_queue; in srp_slave_configure()
2854 if (sdev->type == TYPE_DISK) { in srp_slave_configure()
2855 timeout = max_t(unsigned, 30 * HZ, target->rq_tmo_jiffies); in srp_slave_configure()
2867 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->id_ext)); in id_ext_show()
2877 return sysfs_emit(buf, "0x%016llx\n", be64_to_cpu(target->ioc_guid)); in ioc_guid_show()
2887 if (target->using_rdma_cm) in service_id_show()
2888 return -ENOENT; in service_id_show()
2890 be64_to_cpu(target->ib_cm.service_id)); in service_id_show()
2900 if (target->using_rdma_cm) in pkey_show()
2901 return -ENOENT; in pkey_show()
2903 return sysfs_emit(buf, "0x%04x\n", be16_to_cpu(target->ib_cm.pkey)); in pkey_show()
2913 return sysfs_emit(buf, "%pI6\n", target->sgid.raw); in sgid_show()
2922 struct srp_rdma_ch *ch = &target->ch[0]; in dgid_show()
2924 if (target->using_rdma_cm) in dgid_show()
2925 return -ENOENT; in dgid_show()
2927 return sysfs_emit(buf, "%pI6\n", ch->ib_cm.path.dgid.raw); in dgid_show()
2937 if (target->using_rdma_cm) in orig_dgid_show()
2938 return -ENOENT; in orig_dgid_show()
2940 return sysfs_emit(buf, "%pI6\n", target->ib_cm.orig_dgid.raw); in orig_dgid_show()
2952 for (i = 0; i < target->ch_count; i++) { in req_lim_show()
2953 ch = &target->ch[i]; in req_lim_show()
2954 req_lim = min(req_lim, ch->req_lim); in req_lim_show()
2967 return sysfs_emit(buf, "%d\n", target->zero_req_lim); in zero_req_lim_show()
2977 return sysfs_emit(buf, "%u\n", target->srp_host->port); in local_ib_port_show()
2988 dev_name(&target->srp_host->srp_dev->dev->dev)); in local_ib_device_show()
2998 return sysfs_emit(buf, "%d\n", target->ch_count); in ch_count_show()
3008 return sysfs_emit(buf, "%d\n", target->comp_vector); in comp_vector_show()
3018 return sysfs_emit(buf, "%d\n", target->tl_retry_count); in tl_retry_count_show()
3028 return sysfs_emit(buf, "%u\n", target->cmd_sg_cnt); in cmd_sg_entries_show()
3038 return sysfs_emit(buf, "%s\n", target->allow_ext_sg ? "true" : "false"); in allow_ext_sg_show()
3083 .this_id = -1,
3104 * 0 and target->state == SRP_TARGET_REMOVED if asynchronous target port
3106 * 0 and target->state != SRP_TARGET_REMOVED upon success.
3113 target->state = SRP_TARGET_SCANNING; in srp_add_target()
3114 sprintf(target->target_name, "SRP.T10:%016llX", in srp_add_target()
3115 be64_to_cpu(target->id_ext)); in srp_add_target()
3117 if (scsi_add_host(target->scsi_host, host->srp_dev->dev->dev.parent)) in srp_add_target()
3118 return -ENODEV; in srp_add_target()
3120 memcpy(ids.port_id, &target->id_ext, 8); in srp_add_target()
3121 memcpy(ids.port_id + 8, &target->ioc_guid, 8); in srp_add_target()
3123 rport = srp_rport_add(target->scsi_host, &ids); in srp_add_target()
3125 scsi_remove_host(target->scsi_host); in srp_add_target()
3129 rport->lld_data = target; in srp_add_target()
3130 target->rport = rport; in srp_add_target()
3132 spin_lock(&host->target_lock); in srp_add_target()
3133 list_add_tail(&target->list, &host->target_list); in srp_add_target()
3134 spin_unlock(&host->target_lock); in srp_add_target()
3136 scsi_scan_target(&target->scsi_host->shost_gendev, in srp_add_target()
3137 0, target->scsi_id, SCAN_WILD_CARD, SCSI_SCAN_INITIAL); in srp_add_target()
3139 if (srp_connected_ch(target) < target->ch_count || in srp_add_target()
3140 target->qp_in_error) { in srp_add_target()
3141 shost_printk(KERN_INFO, target->scsi_host, in srp_add_target()
3142 PFX "SCSI scan failed - removing SCSI host\n"); in srp_add_target()
3147 pr_debug("%s: SCSI scan succeeded - detected %d LUNs\n", in srp_add_target()
3148 dev_name(&target->scsi_host->shost_gendev), in srp_add_target()
3149 srp_sdev_count(target->scsi_host)); in srp_add_target()
3151 spin_lock_irq(&target->lock); in srp_add_target()
3152 if (target->state == SRP_TARGET_SCANNING) in srp_add_target()
3153 target->state = SRP_TARGET_LIVE; in srp_add_target()
3154 spin_unlock_irq(&target->lock); in srp_add_target()
3179 * srp_conn_unique() - check whether the connection to a target is unique
3189 if (target->state == SRP_TARGET_REMOVED) in srp_conn_unique()
3194 spin_lock(&host->target_lock); in srp_conn_unique()
3195 list_for_each_entry(t, &host->target_list, list) { in srp_conn_unique()
3197 target->id_ext == t->id_ext && in srp_conn_unique()
3198 target->ioc_guid == t->ioc_guid && in srp_conn_unique()
3199 target->initiator_ext == t->initiator_ext) { in srp_conn_unique()
3204 spin_unlock(&host->target_lock); in srp_conn_unique()
3281 * srp_parse_in - parse an IP address and port number combination
3288 * - IPv4: <ip_address>:<port>, e.g. 1.2.3.4:5.
3289 * - IPv6: \[<ipv6_address>\]:<port>, e.g. [1::2:3%4]:5.
3299 return -ENOMEM; in srp_parse_in()
3309 addr_end = addr + strlen(addr) - 1; in srp_parse_in()
3317 pr_debug("%s -> %pISpfsc\n", addr_port_str, sa); in srp_parse_in()
3331 int ret = -EINVAL; in srp_parse_options()
3336 return -ENOMEM; in srp_parse_options()
3350 ret = -ENOMEM; in srp_parse_options()
3359 target->id_ext = cpu_to_be64(ull); in srp_parse_options()
3366 ret = -ENOMEM; in srp_parse_options()
3375 target->ioc_guid = cpu_to_be64(ull); in srp_parse_options()
3382 ret = -ENOMEM; in srp_parse_options()
3391 ret = hex2bin(target->ib_cm.orig_dgid.raw, p, 16); in srp_parse_options()
3403 target->ib_cm.pkey = cpu_to_be16(token); in srp_parse_options()
3409 ret = -ENOMEM; in srp_parse_options()
3418 target->ib_cm.service_id = cpu_to_be64(ull); in srp_parse_options()
3425 ret = -ENOMEM; in srp_parse_options()
3428 ret = srp_parse_in(net, &target->rdma_cm.src.ss, p, in srp_parse_options()
3435 target->rdma_cm.src_specified = true; in srp_parse_options()
3442 ret = -ENOMEM; in srp_parse_options()
3445 ret = srp_parse_in(net, &target->rdma_cm.dst.ss, p, in srp_parse_options()
3448 ret = -EINVAL; in srp_parse_options()
3454 target->using_rdma_cm = true; in srp_parse_options()
3464 target->scsi_host->max_sectors = token; in srp_parse_options()
3476 ret = -EINVAL; in srp_parse_options()
3479 target->scsi_host->can_queue = token; in srp_parse_options()
3480 target->queue_size = token + SRP_RSP_SQ_SIZE + in srp_parse_options()
3483 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3496 ret = -EINVAL; in srp_parse_options()
3499 target->scsi_host->cmd_per_lun = token; in srp_parse_options()
3512 ret = -EINVAL; in srp_parse_options()
3515 target->target_can_queue = token; in srp_parse_options()
3529 ret = -EINVAL; in srp_parse_options()
3532 target->io_class = token; in srp_parse_options()
3538 ret = -ENOMEM; in srp_parse_options()
3547 target->initiator_ext = cpu_to_be64(ull); in srp_parse_options()
3561 ret = -EINVAL; in srp_parse_options()
3564 target->cmd_sg_cnt = token; in srp_parse_options()
3573 target->allow_ext_sg = !!token; in srp_parse_options()
3586 ret = -EINVAL; in srp_parse_options()
3589 target->sg_tablesize = token; in srp_parse_options()
3601 ret = -EINVAL; in srp_parse_options()
3604 target->comp_vector = token; in srp_parse_options()
3617 ret = -EINVAL; in srp_parse_options()
3620 target->tl_retry_count = token; in srp_parse_options()
3632 ret = -EINVAL; in srp_parse_options()
3635 target->max_it_iu_size = token; in srp_parse_options()
3647 ret = -EINVAL; in srp_parse_options()
3650 target->ch_count = token; in srp_parse_options()
3656 ret = -EINVAL; in srp_parse_options()
3670 if (target->scsi_host->cmd_per_lun > target->scsi_host->can_queue in srp_parse_options()
3673 target->scsi_host->cmd_per_lun, in srp_parse_options()
3674 target->scsi_host->can_queue); in srp_parse_options()
3690 struct srp_device *srp_dev = host->srp_dev; in add_target_store()
3691 struct ib_device *ibdev = srp_dev->dev; in add_target_store()
3700 return -ENOMEM; in add_target_store()
3702 target_host->transportt = ib_srp_transport_template; in add_target_store()
3703 target_host->max_channel = 0; in add_target_store()
3704 target_host->max_id = 1; in add_target_store()
3705 target_host->max_lun = -1LL; in add_target_store()
3706 target_host->max_cmd_len = sizeof ((struct srp_cmd *) (void *) 0L)->cdb; in add_target_store()
3707 target_host->max_segment_size = ib_dma_max_seg_size(ibdev); in add_target_store()
3709 if (!(ibdev->attrs.kernel_cap_flags & IBK_SG_GAPS_REG)) in add_target_store()
3710 target_host->virt_boundary_mask = ~srp_dev->mr_page_mask; in add_target_store()
3714 target->net = kobj_ns_grab_current(KOBJ_NS_TYPE_NET); in add_target_store()
3715 target->io_class = SRP_REV16A_IB_IO_CLASS; in add_target_store()
3716 target->scsi_host = target_host; in add_target_store()
3717 target->srp_host = host; in add_target_store()
3718 target->lkey = host->srp_dev->pd->local_dma_lkey; in add_target_store()
3719 target->global_rkey = host->srp_dev->global_rkey; in add_target_store()
3720 target->cmd_sg_cnt = cmd_sg_entries; in add_target_store()
3721 target->sg_tablesize = indirect_sg_entries ? : cmd_sg_entries; in add_target_store()
3722 target->allow_ext_sg = allow_ext_sg; in add_target_store()
3723 target->tl_retry_count = 7; in add_target_store()
3724 target->queue_size = SRP_DEFAULT_QUEUE_SIZE; in add_target_store()
3730 scsi_host_get(target->scsi_host); in add_target_store()
3732 ret = mutex_lock_interruptible(&host->add_target_mutex); in add_target_store()
3736 ret = srp_parse_options(target->net, buf, target); in add_target_store()
3740 if (!srp_conn_unique(target->srp_host, target)) { in add_target_store()
3741 if (target->using_rdma_cm) { in add_target_store()
3742 shost_printk(KERN_INFO, target->scsi_host, in add_target_store()
3744 be64_to_cpu(target->id_ext), in add_target_store()
3745 be64_to_cpu(target->ioc_guid), in add_target_store()
3746 &target->rdma_cm.dst); in add_target_store()
3748 shost_printk(KERN_INFO, target->scsi_host, in add_target_store()
3750 be64_to_cpu(target->id_ext), in add_target_store()
3751 be64_to_cpu(target->ioc_guid), in add_target_store()
3752 be64_to_cpu(target->initiator_ext)); in add_target_store()
3754 ret = -EEXIST; in add_target_store()
3758 if (!srp_dev->has_fr && !target->allow_ext_sg && in add_target_store()
3759 target->cmd_sg_cnt < target->sg_tablesize) { in add_target_store()
3761 target->sg_tablesize = target->cmd_sg_cnt; in add_target_store()
3764 if (srp_dev->use_fast_reg) { in add_target_store()
3765 bool gaps_reg = ibdev->attrs.kernel_cap_flags & in add_target_store()
3768 max_sectors_per_mr = srp_dev->max_pages_per_mr << in add_target_store()
3769 (ilog2(srp_dev->mr_page_size) - 9); in add_target_store()
3785 (target->scsi_host->max_sectors + 1 + in add_target_store()
3786 max_sectors_per_mr - 1) / max_sectors_per_mr; in add_target_store()
3789 (target->sg_tablesize + in add_target_store()
3790 srp_dev->max_pages_per_mr - 1) / in add_target_store()
3791 srp_dev->max_pages_per_mr; in add_target_store()
3794 target->scsi_host->max_sectors, srp_dev->max_pages_per_mr, srp_dev->mr_page_size, in add_target_store()
3798 target_host->sg_tablesize = target->sg_tablesize; in add_target_store()
3799 target->mr_pool_size = target->scsi_host->can_queue * mr_per_cmd; in add_target_store()
3800 target->mr_per_cmd = mr_per_cmd; in add_target_store()
3801 target->indirect_size = target->sg_tablesize * in add_target_store()
3803 max_iu_len = srp_max_it_iu_len(target->cmd_sg_cnt, in add_target_store()
3805 target->max_it_iu_size); in add_target_store()
3807 INIT_WORK(&target->tl_err_work, srp_tl_err_work); in add_target_store()
3808 INIT_WORK(&target->remove_work, srp_remove_work); in add_target_store()
3809 spin_lock_init(&target->lock); in add_target_store()
3810 ret = rdma_query_gid(ibdev, host->port, 0, &target->sgid); in add_target_store()
3814 ret = -ENOMEM; in add_target_store()
3815 if (target->ch_count == 0) { in add_target_store()
3816 target->ch_count = in add_target_store()
3819 ibdev->num_comp_vectors), in add_target_store()
3823 target->ch = kcalloc(target->ch_count, sizeof(*target->ch), in add_target_store()
3825 if (!target->ch) in add_target_store()
3828 for (ch_idx = 0; ch_idx < target->ch_count; ++ch_idx) { in add_target_store()
3829 ch = &target->ch[ch_idx]; in add_target_store()
3830 ch->target = target; in add_target_store()
3831 ch->comp_vector = ch_idx % ibdev->num_comp_vectors; in add_target_store()
3832 spin_lock_init(&ch->lock); in add_target_store()
3833 INIT_LIST_HEAD(&ch->free_tx); in add_target_store()
3846 if (target->using_rdma_cm) in add_target_store()
3848 &target->rdma_cm.dst); in add_target_store()
3851 target->ib_cm.orig_dgid.raw); in add_target_store()
3852 shost_printk(KERN_ERR, target->scsi_host, in add_target_store()
3855 target->ch_count, dst); in add_target_store()
3860 target->ch_count = ch - target->ch; in add_target_store()
3868 target->scsi_host->nr_hw_queues = target->ch_count; in add_target_store()
3874 if (target->state != SRP_TARGET_REMOVED) { in add_target_store()
3875 if (target->using_rdma_cm) { in add_target_store()
3876 shost_printk(KERN_DEBUG, target->scsi_host, PFX in add_target_store()
3878 be64_to_cpu(target->id_ext), in add_target_store()
3879 be64_to_cpu(target->ioc_guid), in add_target_store()
3880 target->sgid.raw, &target->rdma_cm.dst); in add_target_store()
3882 shost_printk(KERN_DEBUG, target->scsi_host, PFX in add_target_store()
3884 be64_to_cpu(target->id_ext), in add_target_store()
3885 be64_to_cpu(target->ioc_guid), in add_target_store()
3886 be16_to_cpu(target->ib_cm.pkey), in add_target_store()
3887 be64_to_cpu(target->ib_cm.service_id), in add_target_store()
3888 target->sgid.raw, in add_target_store()
3889 target->ib_cm.orig_dgid.raw); in add_target_store()
3896 mutex_unlock(&host->add_target_mutex); in add_target_store()
3899 scsi_host_put(target->scsi_host); in add_target_store()
3906 if (target->state != SRP_TARGET_REMOVED) in add_target_store()
3907 kobj_ns_drop(KOBJ_NS_TYPE_NET, target->net); in add_target_store()
3908 scsi_host_put(target->scsi_host); in add_target_store()
3917 for (i = 0; i < target->ch_count; i++) { in add_target_store()
3918 ch = &target->ch[i]; in add_target_store()
3922 kfree(target->ch); in add_target_store()
3933 return sysfs_emit(buf, "%s\n", dev_name(&host->srp_dev->dev->dev)); in ibdev_show()
3943 return sysfs_emit(buf, "%u\n", host->port); in port_show()
3963 INIT_LIST_HEAD(&host->target_list); in srp_add_port()
3964 spin_lock_init(&host->target_lock); in srp_add_port()
3965 mutex_init(&host->add_target_mutex); in srp_add_port()
3966 host->srp_dev = device; in srp_add_port()
3967 host->port = port; in srp_add_port()
3969 device_initialize(&host->dev); in srp_add_port()
3970 host->dev.class = &srp_class; in srp_add_port()
3971 host->dev.parent = device->dev->dev.parent; in srp_add_port()
3972 if (dev_set_name(&host->dev, "srp-%s-%u", dev_name(&device->dev->dev), in srp_add_port()
3975 if (device_add(&host->dev)) in srp_add_port()
3981 device_del(&host->dev); in srp_add_port()
3982 put_device(&host->dev); in srp_add_port()
3991 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_rename_dev()
3994 snprintf(name, sizeof(name), "srp-%s-%u", in srp_rename_dev()
3995 dev_name(&device->dev), host->port); in srp_rename_dev()
3996 device_rename(&host->dev, name); in srp_rename_dev()
4003 struct ib_device_attr *attr = &device->attrs; in srp_add_one()
4012 return -ENOMEM; in srp_add_one()
4019 mr_page_shift = max(12, ffs(attr->page_size_cap) - 1); in srp_add_one()
4020 srp_dev->mr_page_size = 1 << mr_page_shift; in srp_add_one()
4021 srp_dev->mr_page_mask = ~((u64) srp_dev->mr_page_size - 1); in srp_add_one()
4022 max_pages_per_mr = attr->max_mr_size; in srp_add_one()
4023 do_div(max_pages_per_mr, srp_dev->mr_page_size); in srp_add_one()
4025 attr->max_mr_size, srp_dev->mr_page_size, in srp_add_one()
4027 srp_dev->max_pages_per_mr = min_t(u64, SRP_MAX_PAGES_PER_MR, in srp_add_one()
4030 srp_dev->has_fr = (attr->device_cap_flags & in srp_add_one()
4032 if (!never_register && !srp_dev->has_fr) in srp_add_one()
4033 dev_warn(&device->dev, "FR is not supported\n"); in srp_add_one()
4035 attr->max_mr_size >= 2 * srp_dev->mr_page_size) in srp_add_one()
4036 srp_dev->use_fast_reg = srp_dev->has_fr; in srp_add_one()
4038 if (never_register || !register_always || !srp_dev->has_fr) in srp_add_one()
4041 if (srp_dev->use_fast_reg) { in srp_add_one()
4042 srp_dev->max_pages_per_mr = in srp_add_one()
4043 min_t(u32, srp_dev->max_pages_per_mr, in srp_add_one()
4044 attr->max_fast_reg_page_list_len); in srp_add_one()
4046 srp_dev->mr_max_size = srp_dev->mr_page_size * in srp_add_one()
4047 srp_dev->max_pages_per_mr; in srp_add_one()
4048 …pr_debug("%s: mr_page_shift = %d, device->max_mr_size = %#llx, device->max_fast_reg_page_list_len … in srp_add_one()
4049 dev_name(&device->dev), mr_page_shift, attr->max_mr_size, in srp_add_one()
4050 attr->max_fast_reg_page_list_len, in srp_add_one()
4051 srp_dev->max_pages_per_mr, srp_dev->mr_max_size); in srp_add_one()
4053 INIT_LIST_HEAD(&srp_dev->dev_list); in srp_add_one()
4055 srp_dev->dev = device; in srp_add_one()
4056 srp_dev->pd = ib_alloc_pd(device, flags); in srp_add_one()
4057 if (IS_ERR(srp_dev->pd)) { in srp_add_one()
4058 int ret = PTR_ERR(srp_dev->pd); in srp_add_one()
4065 srp_dev->global_rkey = srp_dev->pd->unsafe_global_rkey; in srp_add_one()
4066 WARN_ON_ONCE(srp_dev->global_rkey == 0); in srp_add_one()
4072 list_add_tail(&host->list, &srp_dev->dev_list); in srp_add_one()
4087 list_for_each_entry_safe(host, tmp_host, &srp_dev->dev_list, list) { in srp_remove_one()
4092 device_del(&host->dev); in srp_remove_one()
4097 spin_lock(&host->target_lock); in srp_remove_one()
4098 list_for_each_entry(target, &host->target_list, list) in srp_remove_one()
4100 spin_unlock(&host->target_lock); in srp_remove_one()
4105 * target->tl_err_work so waiting for the remove works to in srp_remove_one()
4110 put_device(&host->dev); in srp_remove_one()
4113 ib_dealloc_pd(srp_dev->pd); in srp_remove_one()
4171 ret = -ENOMEM; in srp_init_module()
4175 ret = -ENOMEM; in srp_init_module()