Lines Matching full:con
188 return to_clt_con(clt_path->s.con[id]); in rtrs_permit_to_clt_con()
307 static void rtrs_rdma_error_recovery(struct rtrs_clt_con *con) in rtrs_rdma_error_recovery() argument
309 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_error_recovery()
331 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_fast_reg_done() local
334 rtrs_err_rl(con->c.path, "Failed IB_WR_REG_MR: %s\n", in rtrs_clt_fast_reg_done()
336 rtrs_rdma_error_recovery(con); in rtrs_clt_fast_reg_done()
351 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_inv_rkey_done() local
354 rtrs_err_rl(con->c.path, "Failed IB_WR_LOCAL_INV: %s\n", in rtrs_clt_inv_rkey_done()
356 rtrs_rdma_error_recovery(con); in rtrs_clt_inv_rkey_done()
368 struct rtrs_clt_con *con = req->con; in rtrs_inv_rkey() local
377 return ib_post_send(con->c.qp, &wr, NULL); in rtrs_inv_rkey()
383 struct rtrs_clt_con *con = req->con; in complete_rdma_req() local
389 if (WARN_ON(!req->con)) in complete_rdma_req()
391 clt_path = to_clt_path(con->c.path); in complete_rdma_req()
425 rtrs_err_rl(con->c.path, "Send INV WR key=%#x: %d\n", in complete_rdma_req()
442 req->con = NULL; in complete_rdma_req()
445 rtrs_err_rl(con->c.path, in complete_rdma_req()
456 static int rtrs_post_send_rdma(struct rtrs_clt_con *con, in rtrs_post_send_rdma() argument
461 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_post_send_rdma()
466 rtrs_wrn(con->c.path, in rtrs_post_send_rdma()
480 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? in rtrs_post_send_rdma()
487 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, &sge, 1, in rtrs_post_send_rdma()
506 static void rtrs_clt_recv_done(struct rtrs_clt_con *con, struct ib_wc *wc) in rtrs_clt_recv_done() argument
510 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_recv_done()
515 err = rtrs_iu_post_recv(&con->c, iu); in rtrs_clt_recv_done()
517 rtrs_err(con->c.path, "post iu failed %d\n", err); in rtrs_clt_recv_done()
518 rtrs_rdma_error_recovery(con); in rtrs_clt_recv_done()
522 static void rtrs_clt_rkey_rsp_done(struct rtrs_clt_con *con, struct ib_wc *wc) in rtrs_clt_rkey_rsp_done() argument
524 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_rkey_rsp_done()
537 rtrs_err(con->c.path, "rkey response is malformed: size %d\n", in rtrs_clt_rkey_rsp_done()
569 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rkey_rsp_done()
571 rtrs_rdma_error_recovery(con); in rtrs_clt_rkey_rsp_done()
584 static int rtrs_post_recv_empty_x2(struct rtrs_con *con, struct ib_cqe *cqe) in rtrs_post_recv_empty_x2() argument
598 return ib_post_recv(con->qp, wr, NULL); in rtrs_post_recv_empty_x2()
603 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_rdma_done() local
604 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_rdma_done()
613 rtrs_rdma_error_recovery(con); in rtrs_clt_rdma_done()
617 rtrs_clt_update_wc_stats(con); in rtrs_clt_rdma_done()
639 WARN_ON(con->c.cid); in rtrs_clt_rdma_done()
642 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rdma_done()
644 WARN_ON(con->c.cid); in rtrs_clt_rdma_done()
648 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rdma_done()
650 rtrs_wrn(con->c.path, "Unknown IMM type %u\n", in rtrs_clt_rdma_done()
658 err = rtrs_post_recv_empty_x2(&con->c, &io_comp_cqe); in rtrs_clt_rdma_done()
660 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); in rtrs_clt_rdma_done()
662 rtrs_err(con->c.path, "rtrs_post_recv_empty(): %d\n", in rtrs_clt_rdma_done()
664 rtrs_rdma_error_recovery(con); in rtrs_clt_rdma_done()
677 return rtrs_clt_recv_done(con, wc); in rtrs_clt_rdma_done()
679 return rtrs_clt_rkey_rsp_done(con, wc); in rtrs_clt_rdma_done()
695 static int post_recv_io(struct rtrs_clt_con *con, size_t q_size) in post_recv_io() argument
698 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in post_recv_io()
702 struct rtrs_iu *iu = &con->rsp_ius[i]; in post_recv_io()
704 err = rtrs_iu_post_recv(&con->c, iu); in post_recv_io()
706 err = rtrs_post_recv_empty(&con->c, &io_comp_cqe); in post_recv_io()
732 err = post_recv_io(to_clt_con(clt_path->s.con[cid]), q_size); in post_recv_path()
967 req->con = rtrs_permit_to_clt_con(clt_path, permit); in rtrs_clt_init_req()
1016 static int rtrs_post_rdma_write_sg(struct rtrs_clt_con *con, in rtrs_post_rdma_write_sg() argument
1023 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_post_rdma_write_sg()
1055 flags = atomic_inc_return(&con->c.wr_cnt) % clt_path->s.signal_interval ? in rtrs_post_rdma_write_sg()
1062 return rtrs_iu_post_rdma_write_imm(&con->c, req->iu, sge, num_sge, in rtrs_post_rdma_write_sg()
1082 struct rtrs_clt_con *con = req->con; in rtrs_clt_write_req() local
1083 struct rtrs_path *s = con->c.path; in rtrs_clt_write_req()
1148 ret = rtrs_post_rdma_write_sg(req->con, req, rbuf, fr_en, count, in rtrs_clt_write_req()
1172 struct rtrs_clt_con *con = req->con; in rtrs_clt_read_req() local
1173 struct rtrs_path *s = con->c.path; in rtrs_clt_read_req()
1259 ret = rtrs_post_send_rdma(req->con, req, &clt_path->rbufs[buf_id], in rtrs_clt_read_req()
1491 struct rtrs_clt_con *con = container_of(c, typeof(*con), c); in rtrs_clt_hb_err_handler() local
1492 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_hb_err_handler()
1494 rtrs_err(con->c.path, "HB err handler for path=%s\n", kobject_name(&clt_path->kobj)); in rtrs_clt_hb_err_handler()
1495 rtrs_rdma_error_recovery(con); in rtrs_clt_hb_err_handler()
1543 clt_path->s.con = kcalloc(total_con, sizeof(*clt_path->s.con), in alloc_path()
1545 if (!clt_path->s.con) in alloc_path()
1598 kfree(clt_path->s.con); in alloc_path()
1609 kfree(clt_path->s.con); in free_path()
1616 struct rtrs_clt_con *con; in create_con() local
1618 con = kzalloc(sizeof(*con), GFP_KERNEL); in create_con()
1619 if (!con) in create_con()
1623 con->cpu = (cid ? cid - 1 : 0) % nr_cpu_ids; in create_con()
1624 con->c.cid = cid; in create_con()
1625 con->c.path = &clt_path->s; in create_con()
1627 atomic_set(&con->c.wr_cnt, 1); in create_con()
1628 mutex_init(&con->con_mutex); in create_con()
1630 clt_path->s.con[cid] = &con->c; in create_con()
1635 static void destroy_con(struct rtrs_clt_con *con) in destroy_con() argument
1637 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in destroy_con()
1639 clt_path->s.con[con->c.cid] = NULL; in destroy_con()
1640 mutex_destroy(&con->con_mutex); in destroy_con()
1641 kfree(con); in destroy_con()
1644 static int create_con_cq_qp(struct rtrs_clt_con *con) in create_con_cq_qp() argument
1646 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in create_con_cq_qp()
1651 lockdep_assert_held(&con->con_mutex); in create_con_cq_qp()
1652 if (con->c.cid == 0) { in create_con_cq_qp()
1663 clt_path->s.dev = rtrs_ib_dev_find_or_add(con->c.cm_id->device, in create_con_cq_qp()
1704 atomic_set(&con->c.sq_wr_avail, max_send_wr); in create_con_cq_qp()
1707 if (clt_path->flags & RTRS_MSG_NEW_RKEY_F || con->c.cid == 0) { in create_con_cq_qp()
1708 con->rsp_ius = rtrs_iu_alloc(cq_num, sizeof(*rsp), in create_con_cq_qp()
1713 if (!con->rsp_ius) in create_con_cq_qp()
1715 con->queue_num = cq_num; in create_con_cq_qp()
1717 cq_vector = con->cpu % clt_path->s.dev->ib_dev->num_comp_vectors; in create_con_cq_qp()
1718 if (con->c.cid >= clt_path->s.irq_con_num) in create_con_cq_qp()
1719 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, in create_con_cq_qp()
1723 err = rtrs_cq_qp_create(&clt_path->s, &con->c, max_send_sge, in create_con_cq_qp()
1733 static void destroy_con_cq_qp(struct rtrs_clt_con *con) in destroy_con_cq_qp() argument
1735 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in destroy_con_cq_qp()
1741 lockdep_assert_held(&con->con_mutex); in destroy_con_cq_qp()
1742 rtrs_cq_qp_destroy(&con->c); in destroy_con_cq_qp()
1743 if (con->rsp_ius) { in destroy_con_cq_qp()
1744 rtrs_iu_free(con->rsp_ius, clt_path->s.dev->ib_dev, in destroy_con_cq_qp()
1745 con->queue_num); in destroy_con_cq_qp()
1746 con->rsp_ius = NULL; in destroy_con_cq_qp()
1747 con->queue_num = 0; in destroy_con_cq_qp()
1755 static void stop_cm(struct rtrs_clt_con *con) in stop_cm() argument
1757 rdma_disconnect(con->c.cm_id); in stop_cm()
1758 if (con->c.qp) in stop_cm()
1759 ib_drain_qp(con->c.qp); in stop_cm()
1762 static void destroy_cm(struct rtrs_clt_con *con) in destroy_cm() argument
1764 rdma_destroy_id(con->c.cm_id); in destroy_cm()
1765 con->c.cm_id = NULL; in destroy_cm()
1768 static int rtrs_rdma_addr_resolved(struct rtrs_clt_con *con) in rtrs_rdma_addr_resolved() argument
1770 struct rtrs_path *s = con->c.path; in rtrs_rdma_addr_resolved()
1773 mutex_lock(&con->con_mutex); in rtrs_rdma_addr_resolved()
1774 err = create_con_cq_qp(con); in rtrs_rdma_addr_resolved()
1775 mutex_unlock(&con->con_mutex); in rtrs_rdma_addr_resolved()
1780 err = rdma_resolve_route(con->c.cm_id, RTRS_CONNECT_TIMEOUT_MS); in rtrs_rdma_addr_resolved()
1787 static int rtrs_rdma_route_resolved(struct rtrs_clt_con *con) in rtrs_rdma_route_resolved() argument
1789 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_route_resolved()
1806 .cid = cpu_to_le16(con->c.cid), in rtrs_rdma_route_resolved()
1814 err = rdma_connect_locked(con->c.cm_id, ¶m); in rtrs_rdma_route_resolved()
1821 static int rtrs_rdma_conn_established(struct rtrs_clt_con *con, in rtrs_rdma_conn_established() argument
1824 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_rdma_conn_established()
1853 if (con->c.cid == 0) { in rtrs_rdma_conn_established()
1900 clt_path->hca_port = con->c.cm_id->port_num; in rtrs_rdma_conn_established()
1903 clt_path->s.src_addr = con->c.cm_id->route.addr.src_addr; in rtrs_rdma_conn_established()
1911 static inline void flag_success_on_conn(struct rtrs_clt_con *con) in flag_success_on_conn() argument
1913 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in flag_success_on_conn()
1916 con->cm_err = 1; in flag_success_on_conn()
1919 static int rtrs_rdma_conn_rejected(struct rtrs_clt_con *con, in rtrs_rdma_conn_rejected() argument
1922 struct rtrs_path *s = con->c.path; in rtrs_rdma_conn_rejected()
1929 rej_msg = rdma_reject_msg(con->c.cm_id, status); in rtrs_rdma_conn_rejected()
1930 msg = rdma_consumer_reject_data(con->c.cm_id, ev, &data_len); in rtrs_rdma_conn_rejected()
1960 static inline void flag_error_on_conn(struct rtrs_clt_con *con, int cm_err) in flag_error_on_conn() argument
1962 if (con->cm_err == 1) { in flag_error_on_conn()
1965 clt_path = to_clt_path(con->c.path); in flag_error_on_conn()
1970 con->cm_err = cm_err; in flag_error_on_conn()
1976 struct rtrs_clt_con *con = cm_id->context; in rtrs_clt_rdma_cm_handler() local
1977 struct rtrs_path *s = con->c.path; in rtrs_clt_rdma_cm_handler()
1983 cm_err = rtrs_rdma_addr_resolved(con); in rtrs_clt_rdma_cm_handler()
1986 cm_err = rtrs_rdma_route_resolved(con); in rtrs_clt_rdma_cm_handler()
1989 cm_err = rtrs_rdma_conn_established(con, ev); in rtrs_clt_rdma_cm_handler()
1995 flag_success_on_conn(con); in rtrs_clt_rdma_cm_handler()
2001 cm_err = rtrs_rdma_conn_rejected(con, ev); in rtrs_clt_rdma_cm_handler()
2041 flag_error_on_conn(con, cm_err); in rtrs_clt_rdma_cm_handler()
2042 rtrs_rdma_error_recovery(con); in rtrs_clt_rdma_cm_handler()
2049 static int create_cm(struct rtrs_clt_con *con) in create_cm() argument
2051 struct rtrs_path *s = con->c.path; in create_cm()
2056 cm_id = rdma_create_id(&init_net, rtrs_clt_rdma_cm_handler, con, in create_cm()
2063 con->c.cm_id = cm_id; in create_cm()
2064 con->cm_err = 0; in create_cm()
2085 con->cm_err || clt_path->state != RTRS_CLT_CONNECTING, in create_cm()
2093 if (con->cm_err < 0) in create_cm()
2094 return con->cm_err; in create_cm()
2150 struct rtrs_clt_con *con; in rtrs_clt_stop_and_destroy_conns() local
2178 if (!clt_path->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2180 con = to_clt_con(clt_path->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2181 stop_cm(con); in rtrs_clt_stop_and_destroy_conns()
2200 if (!clt_path->s.con[cid]) in rtrs_clt_stop_and_destroy_conns()
2202 con = to_clt_con(clt_path->s.con[cid]); in rtrs_clt_stop_and_destroy_conns()
2203 mutex_lock(&con->con_mutex); in rtrs_clt_stop_and_destroy_conns()
2204 destroy_con_cq_qp(con); in rtrs_clt_stop_and_destroy_conns()
2205 mutex_unlock(&con->con_mutex); in rtrs_clt_stop_and_destroy_conns()
2206 destroy_cm(con); in rtrs_clt_stop_and_destroy_conns()
2207 destroy_con(con); in rtrs_clt_stop_and_destroy_conns()
2342 err = create_cm(to_clt_con(clt_path->s.con[cid])); in init_conns()
2361 struct rtrs_clt_con *con; in init_conns() local
2363 if (!clt_path->s.con[i]) in init_conns()
2366 con = to_clt_con(clt_path->s.con[i]); in init_conns()
2367 if (con->c.cm_id) { in init_conns()
2368 stop_cm(con); in init_conns()
2369 mutex_lock(&con->con_mutex); in init_conns()
2370 destroy_con_cq_qp(con); in init_conns()
2371 mutex_unlock(&con->con_mutex); in init_conns()
2372 destroy_cm(con); in init_conns()
2374 destroy_con(con); in init_conns()
2388 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_info_req_done() local
2389 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_info_req_done()
2402 rtrs_clt_update_wc_stats(con); in rtrs_clt_info_req_done()
2472 struct rtrs_clt_con *con = to_clt_con(wc->qp->qp_context); in rtrs_clt_info_rsp_done() local
2473 struct rtrs_clt_path *clt_path = to_clt_path(con->c.path); in rtrs_clt_info_rsp_done()
2482 WARN_ON(con->c.cid); in rtrs_clt_info_rsp_done()
2522 rtrs_clt_update_wc_stats(con); in rtrs_clt_info_rsp_done()
2529 struct rtrs_clt_con *usr_con = to_clt_con(clt_path->s.con[0]); in rtrs_send_path_info()
3052 struct rtrs_con *con; in rtrs_clt_rdma_cq_direct() local
3062 con = clt_path->s.con[index + 1]; in rtrs_clt_rdma_cq_direct()
3063 cnt = ib_process_cq_direct(con->cq, -1); in rtrs_clt_rdma_cq_direct()