Lines Matching refs:srv_path

66 static bool rtrs_srv_change_state(struct rtrs_srv_path *srv_path,  in rtrs_srv_change_state()  argument
73 spin_lock_irqsave(&srv_path->state_lock, flags); in rtrs_srv_change_state()
74 old_state = srv_path->state; in rtrs_srv_change_state()
93 srv_path->state = new_state; in rtrs_srv_change_state()
94 spin_unlock_irqrestore(&srv_path->state_lock, flags); in rtrs_srv_change_state()
106 static void rtrs_srv_free_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_free_ops_ids() argument
108 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_free_ops_ids()
111 if (srv_path->ops_ids) { in rtrs_srv_free_ops_ids()
113 free_id(srv_path->ops_ids[i]); in rtrs_srv_free_ops_ids()
114 kfree(srv_path->ops_ids); in rtrs_srv_free_ops_ids()
115 srv_path->ops_ids = NULL; in rtrs_srv_free_ops_ids()
127 struct rtrs_srv_path *srv_path = container_of(ref, in rtrs_srv_inflight_ref_release() local
131 percpu_ref_exit(&srv_path->ids_inflight_ref); in rtrs_srv_inflight_ref_release()
132 complete(&srv_path->complete_done); in rtrs_srv_inflight_ref_release()
135 static int rtrs_srv_alloc_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_alloc_ops_ids() argument
137 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_alloc_ops_ids()
141 srv_path->ops_ids = kcalloc(srv->queue_depth, in rtrs_srv_alloc_ops_ids()
142 sizeof(*srv_path->ops_ids), in rtrs_srv_alloc_ops_ids()
144 if (!srv_path->ops_ids) in rtrs_srv_alloc_ops_ids()
152 srv_path->ops_ids[i] = id; in rtrs_srv_alloc_ops_ids()
155 ret = percpu_ref_init(&srv_path->ids_inflight_ref, in rtrs_srv_alloc_ops_ids()
161 init_completion(&srv_path->complete_done); in rtrs_srv_alloc_ops_ids()
166 rtrs_srv_free_ops_ids(srv_path); in rtrs_srv_alloc_ops_ids()
170 static inline void rtrs_srv_get_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_get_ops_ids() argument
172 percpu_ref_get(&srv_path->ids_inflight_ref); in rtrs_srv_get_ops_ids()
175 static inline void rtrs_srv_put_ops_ids(struct rtrs_srv_path *srv_path) in rtrs_srv_put_ops_ids() argument
177 percpu_ref_put(&srv_path->ids_inflight_ref); in rtrs_srv_put_ops_ids()
184 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_reg_mr_done() local
189 close_path(srv_path); in rtrs_srv_reg_mr_done()
201 struct rtrs_srv_path *srv_path = to_srv_path(s); in rdma_write_sg() local
202 dma_addr_t dma_addr = srv_path->dma_addr[id->msg_id]; in rdma_write_sg()
236 plist->lkey = srv_path->s.dev->ib_pd->local_dma_lkey; in rdma_write_sg()
287 srv_mr = &srv_path->mrs[id->msg_id]; in rdma_write_sg()
303 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; in rdma_write_sg()
307 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in rdma_write_sg()
320 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, dma_addr, in rdma_write_sg()
345 struct rtrs_srv_path *srv_path = to_srv_path(s); in send_io_resp_imm() local
407 srv_mr = &srv_path->mrs[id->msg_id]; in send_io_resp_imm()
424 list.lkey = srv_path->s.dev->ib_pd->local_dma_lkey; in send_io_resp_imm()
428 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in send_io_resp_imm()
449 void close_path(struct rtrs_srv_path *srv_path) in close_path() argument
451 if (rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSING)) in close_path()
452 queue_work(rtrs_wq, &srv_path->close_work); in close_path()
453 WARN_ON(srv_path->state != RTRS_SRV_CLOSING); in close_path()
485 struct rtrs_srv_path *srv_path; in rtrs_srv_resp_rdma() local
495 srv_path = to_srv_path(s); in rtrs_srv_resp_rdma()
499 if (srv_path->state != RTRS_SRV_CONNECTED) { in rtrs_srv_resp_rdma()
502 kobject_name(&srv_path->kobj), in rtrs_srv_resp_rdma()
503 rtrs_srv_state_str(srv_path->state)); in rtrs_srv_resp_rdma()
507 struct rtrs_srv_mr *mr = &srv_path->mrs[id->msg_id]; in rtrs_srv_resp_rdma()
513 kobject_name(&srv_path->kobj), in rtrs_srv_resp_rdma()
529 kobject_name(&srv_path->kobj)); in rtrs_srv_resp_rdma()
530 close_path(srv_path); in rtrs_srv_resp_rdma()
533 rtrs_srv_put_ops_ids(srv_path); in rtrs_srv_resp_rdma()
549 static void unmap_cont_bufs(struct rtrs_srv_path *srv_path) in unmap_cont_bufs() argument
553 for (i = 0; i < srv_path->mrs_num; i++) { in unmap_cont_bufs()
556 srv_mr = &srv_path->mrs[i]; in unmap_cont_bufs()
559 rtrs_iu_free(srv_mr->iu, srv_path->s.dev->ib_dev, 1); in unmap_cont_bufs()
562 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, srv_mr->sgt.sgl, in unmap_cont_bufs()
566 kfree(srv_path->mrs); in unmap_cont_bufs()
569 static int map_cont_bufs(struct rtrs_srv_path *srv_path) in map_cont_bufs() argument
571 struct rtrs_srv_sess *srv = srv_path->srv; in map_cont_bufs()
572 struct rtrs_path *ss = &srv_path->s; in map_cont_bufs()
591 srv_path->s.dev->ib_dev->attrs.max_fast_reg_page_list_len; in map_cont_bufs()
596 srv_path->mrs = kcalloc(mrs_num, sizeof(*srv_path->mrs), GFP_KERNEL); in map_cont_bufs()
597 if (!srv_path->mrs) in map_cont_bufs()
600 for (srv_path->mrs_num = 0; srv_path->mrs_num < mrs_num; in map_cont_bufs()
601 srv_path->mrs_num++) { in map_cont_bufs()
602 struct rtrs_srv_mr *srv_mr = &srv_path->mrs[srv_path->mrs_num]; in map_cont_bufs()
607 chunks = chunks_per_mr * srv_path->mrs_num; in map_cont_bufs()
620 nr_sgt = ib_dma_map_sg(srv_path->s.dev->ib_dev, sgt->sgl, in map_cont_bufs()
626 mr = ib_alloc_mr(srv_path->s.dev->ib_pd, IB_MR_TYPE_MEM_REG, in map_cont_bufs()
642 GFP_KERNEL, srv_path->s.dev->ib_dev, in map_cont_bufs()
652 srv_path->dma_addr[chunks + i] = sg_dma_address(s); in map_cont_bufs()
659 srv_path->mem_bits = (MAX_IMM_PAYL_BITS - chunk_bits); in map_cont_bufs()
666 ib_dma_unmap_sg(srv_path->s.dev->ib_dev, sgt->sgl, in map_cont_bufs()
671 unmap_cont_bufs(srv_path); in map_cont_bufs()
679 struct rtrs_srv_path *srv_path = to_srv_path(con->c.path); in rtrs_srv_hb_err_handler() local
681 rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&srv_path->kobj)); in rtrs_srv_hb_err_handler()
685 static void rtrs_srv_init_hb(struct rtrs_srv_path *srv_path) in rtrs_srv_init_hb() argument
687 rtrs_init_hb(&srv_path->s, &io_comp_cqe, in rtrs_srv_init_hb()
694 static void rtrs_srv_start_hb(struct rtrs_srv_path *srv_path) in rtrs_srv_start_hb() argument
696 rtrs_start_hb(&srv_path->s); in rtrs_srv_start_hb()
699 static void rtrs_srv_stop_hb(struct rtrs_srv_path *srv_path) in rtrs_srv_stop_hb() argument
701 rtrs_stop_hb(&srv_path->s); in rtrs_srv_stop_hb()
708 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_info_rsp_done() local
712 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); in rtrs_srv_info_rsp_done()
717 close_path(srv_path); in rtrs_srv_info_rsp_done()
723 static int rtrs_srv_path_up(struct rtrs_srv_path *srv_path) in rtrs_srv_path_up() argument
725 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_up()
737 srv_path->established = true; in rtrs_srv_path_up()
742 static void rtrs_srv_path_down(struct rtrs_srv_path *srv_path) in rtrs_srv_path_down() argument
744 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_path_down()
747 if (!srv_path->established) in rtrs_srv_path_down()
750 srv_path->established = false; in rtrs_srv_path_down()
762 struct rtrs_srv_path *srv_path; in exist_pathname() local
775 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in exist_pathname()
776 if (strlen(srv_path->s.sessname) == strlen(pathname) && in exist_pathname()
777 !strcmp(srv_path->s.sessname, pathname)) { in exist_pathname()
790 static int post_recv_path(struct rtrs_srv_path *srv_path);
797 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_info_req() local
805 err = post_recv_path(srv_path); in process_info_req()
816 if (exist_pathname(srv_path->srv->ctx, in process_info_req()
817 msg->pathname, &srv_path->srv->paths_uuid)) { in process_info_req()
821 strscpy(srv_path->s.sessname, msg->pathname, in process_info_req()
822 sizeof(srv_path->s.sessname)); in process_info_req()
824 rwr = kcalloc(srv_path->mrs_num, sizeof(*rwr), GFP_KERNEL); in process_info_req()
829 tx_sz += sizeof(rsp->desc[0]) * srv_path->mrs_num; in process_info_req()
830 tx_iu = rtrs_iu_alloc(1, tx_sz, GFP_KERNEL, srv_path->s.dev->ib_dev, in process_info_req()
839 rsp->sg_cnt = cpu_to_le16(srv_path->mrs_num); in process_info_req()
841 for (mri = 0; mri < srv_path->mrs_num; mri++) { in process_info_req()
842 struct ib_mr *mr = srv_path->mrs[mri].mr; in process_info_req()
863 err = rtrs_srv_create_path_files(srv_path); in process_info_req()
866 kobject_get(&srv_path->kobj); in process_info_req()
867 get_device(&srv_path->srv->dev); in process_info_req()
868 err = rtrs_srv_change_state(srv_path, RTRS_SRV_CONNECTED); in process_info_req()
874 rtrs_srv_start_hb(srv_path); in process_info_req()
882 err = rtrs_srv_path_up(srv_path); in process_info_req()
888 ib_dma_sync_single_for_device(srv_path->s.dev->ib_dev, in process_info_req()
897 rtrs_iu_free(tx_iu, srv_path->s.dev->ib_dev, 1); in process_info_req()
909 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_info_req_done() local
929 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, iu->dma_addr, in rtrs_srv_info_req_done()
941 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); in rtrs_srv_info_req_done()
944 rtrs_iu_free(iu, srv_path->s.dev->ib_dev, 1); in rtrs_srv_info_req_done()
945 close_path(srv_path); in rtrs_srv_info_req_done()
951 struct rtrs_srv_path *srv_path = to_srv_path(s); in post_recv_info_req() local
956 GFP_KERNEL, srv_path->s.dev->ib_dev, in post_recv_info_req()
964 rtrs_iu_free(rx_iu, srv_path->s.dev->ib_dev, 1); in post_recv_info_req()
984 static int post_recv_path(struct rtrs_srv_path *srv_path) in post_recv_path() argument
986 struct rtrs_srv_sess *srv = srv_path->srv; in post_recv_path()
987 struct rtrs_path *s = &srv_path->s; in post_recv_path()
991 for (cid = 0; cid < srv_path->s.con_num; cid++) { in post_recv_path()
996 if (srv_path->state != RTRS_SRV_CONNECTING) { in post_recv_path()
998 rtrs_srv_state_str(srv_path->state)); in post_recv_path()
1002 if (!srv_path->s.con[cid]) { in post_recv_path()
1007 err = post_recv_io(to_srv_con(srv_path->s.con[cid]), q_size); in post_recv_path()
1022 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_read() local
1023 struct rtrs_srv_sess *srv = srv_path->srv; in process_read()
1031 if (srv_path->state != RTRS_SRV_CONNECTED) { in process_read()
1034 rtrs_srv_state_str(srv_path->state)); in process_read()
1042 rtrs_srv_get_ops_ids(srv_path); in process_read()
1043 rtrs_srv_update_rdma_stats(srv_path->stats, off, READ); in process_read()
1044 id = srv_path->ops_ids[buf_id]; in process_read()
1070 close_path(srv_path); in process_read()
1072 rtrs_srv_put_ops_ids(srv_path); in process_read()
1080 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_write() local
1081 struct rtrs_srv_sess *srv = srv_path->srv; in process_write()
1089 if (srv_path->state != RTRS_SRV_CONNECTED) { in process_write()
1092 rtrs_srv_state_str(srv_path->state)); in process_write()
1095 rtrs_srv_get_ops_ids(srv_path); in process_write()
1096 rtrs_srv_update_rdma_stats(srv_path->stats, off, WRITE); in process_write()
1097 id = srv_path->ops_ids[buf_id]; in process_write()
1122 close_path(srv_path); in process_write()
1124 rtrs_srv_put_ops_ids(srv_path); in process_write()
1131 struct rtrs_srv_path *srv_path = to_srv_path(s); in process_io_req() local
1135 ib_dma_sync_single_for_cpu(srv_path->s.dev->ib_dev, in process_io_req()
1136 srv_path->dma_addr[id], in process_io_req()
1158 close_path(srv_path); in process_io_req()
1167 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_inv_rkey_done() local
1168 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_inv_rkey_done()
1175 close_path(srv_path); in rtrs_srv_inv_rkey_done()
1224 struct rtrs_srv_path *srv_path = to_srv_path(s); in rtrs_srv_rdma_done() local
1225 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_srv_rdma_done()
1235 close_path(srv_path); in rtrs_srv_rdma_done()
1248 srv_path->s.hb_missed_cnt = 0; in rtrs_srv_rdma_done()
1252 close_path(srv_path); in rtrs_srv_rdma_done()
1261 msg_id = imm_payload >> srv_path->mem_bits; in rtrs_srv_rdma_done()
1262 off = imm_payload & ((1 << srv_path->mem_bits) - 1); in rtrs_srv_rdma_done()
1266 close_path(srv_path); in rtrs_srv_rdma_done()
1270 struct rtrs_srv_mr *mr = &srv_path->mrs[msg_id]; in rtrs_srv_rdma_done()
1278 close_path(srv_path); in rtrs_srv_rdma_done()
1287 rtrs_send_hb_ack(&srv_path->s); in rtrs_srv_rdma_done()
1290 srv_path->s.hb_missed_cnt = 0; in rtrs_srv_rdma_done()
1322 struct rtrs_srv_path *srv_path; in rtrs_srv_get_path_name() local
1326 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in rtrs_srv_get_path_name()
1327 if (srv_path->state != RTRS_SRV_CONNECTED) in rtrs_srv_get_path_name()
1329 strscpy(pathname, srv_path->s.sessname, in rtrs_srv_get_path_name()
1330 min_t(size_t, sizeof(srv_path->s.sessname), len)); in rtrs_srv_get_path_name()
1350 static int find_next_bit_ring(struct rtrs_srv_path *srv_path) in find_next_bit_ring() argument
1352 struct ib_device *ib_dev = srv_path->s.dev->ib_dev; in find_next_bit_ring()
1355 v = cpumask_next(srv_path->cur_cq_vector, &cq_affinity_mask); in find_next_bit_ring()
1361 static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_path *srv_path) in rtrs_srv_get_next_cq_vector() argument
1363 srv_path->cur_cq_vector = find_next_bit_ring(srv_path); in rtrs_srv_get_next_cq_vector()
1365 return srv_path->cur_cq_vector; in rtrs_srv_get_next_cq_vector()
1472 struct rtrs_srv_path *srv_path) in __add_path_to_srv() argument
1474 list_add_tail(&srv_path->s.entry, &srv->paths_list); in __add_path_to_srv()
1479 static void del_path_from_srv(struct rtrs_srv_path *srv_path) in del_path_from_srv() argument
1481 struct rtrs_srv_sess *srv = srv_path->srv; in del_path_from_srv()
1487 list_del(&srv_path->s.entry); in del_path_from_srv()
1520 struct rtrs_srv_path *srv_path; in __is_path_w_addr_exists() local
1522 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in __is_path_w_addr_exists()
1523 if (!sockaddr_cmp((struct sockaddr *)&srv_path->s.dst_addr, in __is_path_w_addr_exists()
1525 !sockaddr_cmp((struct sockaddr *)&srv_path->s.src_addr, in __is_path_w_addr_exists()
1532 static void free_path(struct rtrs_srv_path *srv_path) in free_path() argument
1534 if (srv_path->kobj.state_in_sysfs) { in free_path()
1535 kobject_del(&srv_path->kobj); in free_path()
1536 kobject_put(&srv_path->kobj); in free_path()
1538 free_percpu(srv_path->stats->rdma_stats); in free_path()
1539 kfree(srv_path->stats); in free_path()
1540 kfree(srv_path); in free_path()
1546 struct rtrs_srv_path *srv_path; in rtrs_srv_close_work() local
1550 srv_path = container_of(work, typeof(*srv_path), close_work); in rtrs_srv_close_work()
1552 rtrs_srv_stop_hb(srv_path); in rtrs_srv_close_work()
1554 for (i = 0; i < srv_path->s.con_num; i++) { in rtrs_srv_close_work()
1555 if (!srv_path->s.con[i]) in rtrs_srv_close_work()
1557 con = to_srv_con(srv_path->s.con[i]); in rtrs_srv_close_work()
1566 percpu_ref_kill(&srv_path->ids_inflight_ref); in rtrs_srv_close_work()
1569 wait_for_completion(&srv_path->complete_done); in rtrs_srv_close_work()
1571 rtrs_srv_destroy_path_files(srv_path); in rtrs_srv_close_work()
1574 rtrs_srv_path_down(srv_path); in rtrs_srv_close_work()
1576 unmap_cont_bufs(srv_path); in rtrs_srv_close_work()
1577 rtrs_srv_free_ops_ids(srv_path); in rtrs_srv_close_work()
1579 for (i = 0; i < srv_path->s.con_num; i++) { in rtrs_srv_close_work()
1580 if (!srv_path->s.con[i]) in rtrs_srv_close_work()
1582 con = to_srv_con(srv_path->s.con[i]); in rtrs_srv_close_work()
1587 rtrs_ib_dev_put(srv_path->s.dev); in rtrs_srv_close_work()
1589 del_path_from_srv(srv_path); in rtrs_srv_close_work()
1590 put_srv(srv_path->srv); in rtrs_srv_close_work()
1591 srv_path->srv = NULL; in rtrs_srv_close_work()
1592 rtrs_srv_change_state(srv_path, RTRS_SRV_CLOSED); in rtrs_srv_close_work()
1594 kfree(srv_path->dma_addr); in rtrs_srv_close_work()
1595 kfree(srv_path->s.con); in rtrs_srv_close_work()
1596 free_path(srv_path); in rtrs_srv_close_work()
1599 static int rtrs_rdma_do_accept(struct rtrs_srv_path *srv_path, in rtrs_rdma_do_accept() argument
1602 struct rtrs_srv_sess *srv = srv_path->srv; in rtrs_rdma_do_accept()
1653 struct rtrs_srv_path *srv_path; in __find_path() local
1655 list_for_each_entry(srv_path, &srv->paths_list, s.entry) { in __find_path()
1656 if (uuid_equal(&srv_path->s.uuid, sess_uuid)) in __find_path()
1657 return srv_path; in __find_path()
1663 static int create_con(struct rtrs_srv_path *srv_path, in create_con() argument
1667 struct rtrs_srv_sess *srv = srv_path->srv; in create_con()
1668 struct rtrs_path *s = &srv_path->s; in create_con()
1683 con->c.path = &srv_path->s; in create_con()
1686 wr_limit = srv_path->s.dev->ib_dev->attrs.max_qp_wr; in create_con()
1713 cq_vector = rtrs_srv_get_next_cq_vector(srv_path); in create_con()
1716 err = rtrs_cq_qp_create(&srv_path->s, &con->c, 1, cq_vector, cq_num, in create_con()
1728 WARN_ON(srv_path->s.con[cid]); in create_con()
1729 srv_path->s.con[cid] = &con->c; in create_con()
1754 struct rtrs_srv_path *srv_path; in __alloc_path() local
1768 srv_path = kzalloc(sizeof(*srv_path), GFP_KERNEL); in __alloc_path()
1769 if (!srv_path) in __alloc_path()
1772 srv_path->stats = kzalloc(sizeof(*srv_path->stats), GFP_KERNEL); in __alloc_path()
1773 if (!srv_path->stats) in __alloc_path()
1776 srv_path->stats->rdma_stats = alloc_percpu(struct rtrs_srv_stats_rdma_stats); in __alloc_path()
1777 if (!srv_path->stats->rdma_stats) in __alloc_path()
1780 srv_path->stats->srv_path = srv_path; in __alloc_path()
1782 srv_path->dma_addr = kcalloc(srv->queue_depth, in __alloc_path()
1783 sizeof(*srv_path->dma_addr), in __alloc_path()
1785 if (!srv_path->dma_addr) in __alloc_path()
1788 srv_path->s.con = kcalloc(con_num, sizeof(*srv_path->s.con), in __alloc_path()
1790 if (!srv_path->s.con) in __alloc_path()
1793 srv_path->state = RTRS_SRV_CONNECTING; in __alloc_path()
1794 srv_path->srv = srv; in __alloc_path()
1795 srv_path->cur_cq_vector = -1; in __alloc_path()
1796 srv_path->s.dst_addr = cm_id->route.addr.dst_addr; in __alloc_path()
1797 srv_path->s.src_addr = cm_id->route.addr.src_addr; in __alloc_path()
1800 path.src = &srv_path->s.src_addr; in __alloc_path()
1801 path.dst = &srv_path->s.dst_addr; in __alloc_path()
1803 strscpy(srv_path->s.sessname, str, sizeof(srv_path->s.sessname)); in __alloc_path()
1805 srv_path->s.con_num = con_num; in __alloc_path()
1806 srv_path->s.irq_con_num = con_num; in __alloc_path()
1807 srv_path->s.recon_cnt = recon_cnt; in __alloc_path()
1808 uuid_copy(&srv_path->s.uuid, uuid); in __alloc_path()
1809 spin_lock_init(&srv_path->state_lock); in __alloc_path()
1810 INIT_WORK(&srv_path->close_work, rtrs_srv_close_work); in __alloc_path()
1811 rtrs_srv_init_hb(srv_path); in __alloc_path()
1813 srv_path->s.dev = rtrs_ib_dev_find_or_add(cm_id->device, &dev_pd); in __alloc_path()
1814 if (!srv_path->s.dev) { in __alloc_path()
1818 err = map_cont_bufs(srv_path); in __alloc_path()
1822 err = rtrs_srv_alloc_ops_ids(srv_path); in __alloc_path()
1826 __add_path_to_srv(srv, srv_path); in __alloc_path()
1828 return srv_path; in __alloc_path()
1831 unmap_cont_bufs(srv_path); in __alloc_path()
1833 rtrs_ib_dev_put(srv_path->s.dev); in __alloc_path()
1835 kfree(srv_path->s.con); in __alloc_path()
1837 kfree(srv_path->dma_addr); in __alloc_path()
1839 free_percpu(srv_path->stats->rdma_stats); in __alloc_path()
1841 kfree(srv_path->stats); in __alloc_path()
1843 kfree(srv_path); in __alloc_path()
1853 struct rtrs_srv_path *srv_path; in rtrs_rdma_connect() local
1894 srv_path = __find_path(srv, &msg->sess_uuid); in rtrs_rdma_connect()
1895 if (srv_path) { in rtrs_rdma_connect()
1896 struct rtrs_path *s = &srv_path->s; in rtrs_rdma_connect()
1901 if (srv_path->state != RTRS_SRV_CONNECTING) { in rtrs_rdma_connect()
1903 rtrs_srv_state_str(srv_path->state)); in rtrs_rdma_connect()
1923 srv_path = __alloc_path(srv, cm_id, con_num, recon_cnt, in rtrs_rdma_connect()
1925 if (IS_ERR(srv_path)) { in rtrs_rdma_connect()
1928 err = PTR_ERR(srv_path); in rtrs_rdma_connect()
1933 err = create_con(srv_path, cm_id, cid); in rtrs_rdma_connect()
1935 rtrs_err((&srv_path->s), "create_con(), error %d\n", err); in rtrs_rdma_connect()
1944 err = rtrs_rdma_do_accept(srv_path, cm_id); in rtrs_rdma_connect()
1946 rtrs_err((&srv_path->s), "rtrs_rdma_do_accept(), error %d\n", err); in rtrs_rdma_connect()
1966 close_path(srv_path); in rtrs_rdma_connect()
1974 struct rtrs_srv_path *srv_path = NULL; in rtrs_srv_rdma_cm_handler() local
1988 srv_path = to_srv_path(s); in rtrs_srv_rdma_cm_handler()
2004 close_path(srv_path); in rtrs_srv_rdma_cm_handler()
2214 struct rtrs_srv_path *srv_path; in close_paths() local
2217 list_for_each_entry(srv_path, &srv->paths_list, s.entry) in close_paths()
2218 close_path(srv_path); in close_paths()