Home
last modified time | relevance | path

Searched refs:hr_cq (Results 1 – 6 of 6) sorted by relevance

/linux-6.12.1/drivers/infiniband/hw/hns/
Dhns_roce_cq.c58 static int alloc_cqn(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqn() argument
76 hr_cq->cqn = (id << CQ_BANKID_SHIFT) | bankid; in alloc_cqn()
104 struct hns_roce_cq *hr_cq, in hns_roce_create_cqc() argument
117 hr_dev->hw->write_cqc(hr_dev, hr_cq, mailbox->buf, mtts, dma_handle); in hns_roce_create_cqc()
120 hr_cq->cqn); in hns_roce_create_cqc()
124 hr_cq->cqn, ret); in hns_roce_create_cqc()
131 static int alloc_cqc(struct hns_roce_dev *hr_dev, struct hns_roce_cq *hr_cq) in alloc_cqc() argument
138 ret = hns_roce_mtr_find(hr_dev, &hr_cq->mtr, 0, mtts, ARRAY_SIZE(mtts)); in alloc_cqc()
145 ret = hns_roce_table_get(hr_dev, &cq_table->table, hr_cq->cqn); in alloc_cqc()
148 hr_cq->cqn, ret); in alloc_cqc()
[all …]
Dhns_roce_restrack.c14 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry() local
21 if (rdma_nl_put_driver_u32(msg, "cq_depth", hr_cq->cq_depth)) in hns_roce_fill_res_cq_entry()
24 if (rdma_nl_put_driver_u32(msg, "cons_index", hr_cq->cons_index)) in hns_roce_fill_res_cq_entry()
27 if (rdma_nl_put_driver_u32(msg, "cqe_size", hr_cq->cqe_size)) in hns_roce_fill_res_cq_entry()
30 if (rdma_nl_put_driver_u32(msg, "arm_sn", hr_cq->arm_sn)) in hns_roce_fill_res_cq_entry()
46 struct hns_roce_cq *hr_cq = to_hr_cq(ib_cq); in hns_roce_fill_res_cq_entry_raw() local
53 ret = hr_dev->hw->query_cqc(hr_dev, hr_cq->cqn, &context); in hns_roce_fill_res_cq_entry_raw()
Dhns_roce_hw_v2.c2613 struct hns_roce_cq *hr_cq; in free_mr_init_cq() local
2618 hr_cq = kzalloc(sizeof(*hr_cq), GFP_KERNEL); in free_mr_init_cq()
2619 if (ZERO_OR_NULL_PTR(hr_cq)) in free_mr_init_cq()
2622 cq = &hr_cq->ib_cq; in free_mr_init_cq()
2627 kfree(hr_cq); in free_mr_init_cq()
3502 static void *get_cqe_v2(struct hns_roce_cq *hr_cq, int n) in get_cqe_v2() argument
3504 return hns_roce_buf_offset(hr_cq->mtr.kmem, n * hr_cq->cqe_size); in get_cqe_v2()
3507 static void *get_sw_cqe_v2(struct hns_roce_cq *hr_cq, unsigned int n) in get_sw_cqe_v2() argument
3509 struct hns_roce_v2_cqe *cqe = get_cqe_v2(hr_cq, n & hr_cq->ib_cq.cqe); in get_sw_cqe_v2()
3512 return (hr_reg_read(cqe, CQE_OWNER) ^ !!(n & hr_cq->cq_depth)) ? cqe : in get_sw_cqe_v2()
[all …]
Dhns_roce_main.c1009 struct hns_roce_cq *hr_cq = to_hr_cq(cq); in check_and_get_armed_cq() local
1012 spin_lock_irqsave(&hr_cq->lock, flags); in check_and_get_armed_cq()
1014 if (!hr_cq->is_armed) { in check_and_get_armed_cq()
1015 hr_cq->is_armed = 1; in check_and_get_armed_cq()
1016 list_add_tail(&hr_cq->node, cq_list); in check_and_get_armed_cq()
1019 spin_unlock_irqrestore(&hr_cq->lock, flags); in check_and_get_armed_cq()
1025 struct hns_roce_cq *hr_cq; in hns_roce_handle_device_err() local
1045 list_for_each_entry(hr_cq, &cq_list, node) in hns_roce_handle_device_err()
1046 hns_roce_cq_completion(hr_dev, hr_cq->cqn); in hns_roce_handle_device_err()
Dhns_roce_qp.c1528 struct hns_roce_cq *hr_cq; in hns_roce_wq_overflow() local
1535 hr_cq = to_hr_cq(ib_cq); in hns_roce_wq_overflow()
1536 spin_lock(&hr_cq->lock); in hns_roce_wq_overflow()
1538 spin_unlock(&hr_cq->lock); in hns_roce_wq_overflow()
Dhns_roce_device.h941 struct hns_roce_cq *hr_cq, void *mb_buf, u64 *mtts,