Searched refs:hr_qp (Results 1 – 5 of 5) sorted by relevance
/linux-6.12.1/drivers/infiniband/hw/hns/ |
D | hns_roce_qp.c | 46 struct hns_roce_qp *hr_qp = container_of(flush_work, in flush_work_handle() local 56 if (test_and_clear_bit(HNS_ROCE_FLUSH_FLAG, &hr_qp->flush_flag)) { in flush_work_handle() 57 ret = hns_roce_modify_qp(&hr_qp->ibqp, &attr, attr_mask, NULL); in flush_work_handle() 67 if (refcount_dec_and_test(&hr_qp->refcount)) in flush_work_handle() 68 complete(&hr_qp->free); in flush_work_handle() 71 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp) in init_flush_work() argument 73 struct hns_roce_work *flush_work = &hr_qp->flush_work; in init_flush_work() 77 refcount_inc(&hr_qp->refcount); in init_flush_work() 128 static void hns_roce_ib_qp_event(struct hns_roce_qp *hr_qp, in hns_roce_ib_qp_event() argument 131 struct ib_qp *ibqp = &hr_qp->ibqp; in hns_roce_ib_qp_event() [all …]
|
D | hns_roce_restrack.c | 64 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); in hns_roce_fill_res_qp_entry() local 71 if (rdma_nl_put_driver_u32_hex(msg, "sq_wqe_cnt", hr_qp->sq.wqe_cnt)) in hns_roce_fill_res_qp_entry() 74 if (rdma_nl_put_driver_u32_hex(msg, "sq_max_gs", hr_qp->sq.max_gs)) in hns_roce_fill_res_qp_entry() 77 if (rdma_nl_put_driver_u32_hex(msg, "rq_wqe_cnt", hr_qp->rq.wqe_cnt)) in hns_roce_fill_res_qp_entry() 80 if (rdma_nl_put_driver_u32_hex(msg, "rq_max_gs", hr_qp->rq.max_gs)) in hns_roce_fill_res_qp_entry() 83 if (rdma_nl_put_driver_u32_hex(msg, "ext_sge_sge_cnt", hr_qp->sge.sge_cnt)) in hns_roce_fill_res_qp_entry() 99 struct hns_roce_qp *hr_qp = to_hr_qp(ib_qp); in hns_roce_fill_res_qp_entry_raw() local 109 ret = hr_dev->hw->query_qpc(hr_dev, hr_qp->qpn, &context.qpc); in hns_roce_fill_res_qp_entry_raw() 120 ret = hr_dev->hw->query_sccc(hr_dev, hr_qp->qpn, &context.sccc); in hns_roce_fill_res_qp_entry_raw()
|
D | hns_roce_hw_v2.c | 374 struct hns_roce_qp *hr_qp) in check_send_valid() argument 378 if (unlikely(hr_qp->state == IB_QPS_RESET || in check_send_valid() 379 hr_qp->state == IB_QPS_INIT || in check_send_valid() 380 hr_qp->state == IB_QPS_RTR)) { in check_send_valid() 382 hr_qp->state); in check_send_valid() 768 struct hns_roce_qp *hr_qp) in check_recv_valid() argument 773 if (hr_qp->state == IB_QPS_RESET) in check_recv_valid() 806 static void fill_rq_wqe(struct hns_roce_qp *hr_qp, const struct ib_recv_wr *wr, in fill_rq_wqe() argument 811 wqe = hns_roce_get_recv_wqe(hr_qp, wqe_idx); in fill_rq_wqe() 812 fill_recv_sge_to_wqe(wr, wqe, max_sge, hr_qp->rq.rsv_sge); in fill_rq_wqe() [all …]
|
D | hns_roce_main.c | 1024 struct hns_roce_qp *hr_qp; in hns_roce_handle_device_err() local 1033 list_for_each_entry(hr_qp, &hr_dev->qp_list, node) { in hns_roce_handle_device_err() 1034 spin_lock_irqsave(&hr_qp->sq.lock, flags_qp); in hns_roce_handle_device_err() 1035 if (hr_qp->sq.tail != hr_qp->sq.head) in hns_roce_handle_device_err() 1036 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.send_cq); in hns_roce_handle_device_err() 1037 spin_unlock_irqrestore(&hr_qp->sq.lock, flags_qp); in hns_roce_handle_device_err() 1039 spin_lock_irqsave(&hr_qp->rq.lock, flags_qp); in hns_roce_handle_device_err() 1040 if ((!hr_qp->ibqp.srq) && (hr_qp->rq.tail != hr_qp->rq.head)) in hns_roce_handle_device_err() 1041 check_and_get_armed_cq(&cq_list, hr_qp->ibqp.recv_cq); in hns_roce_handle_device_err() 1042 spin_unlock_irqrestore(&hr_qp->rq.lock, flags_qp); in hns_roce_handle_device_err()
|
D | hns_roce_device.h | 952 struct hns_roce_qp *hr_qp); 1260 void init_flush_work(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1261 void *hns_roce_get_recv_wqe(struct hns_roce_qp *hr_qp, unsigned int n); 1262 void *hns_roce_get_send_wqe(struct hns_roce_qp *hr_qp, unsigned int n); 1263 void *hns_roce_get_extend_sge(struct hns_roce_qp *hr_qp, unsigned int n); 1270 void hns_roce_qp_remove(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp); 1271 void hns_roce_qp_destroy(struct hns_roce_dev *hr_dev, struct hns_roce_qp *hr_qp,
|