/linux-6.12.1/drivers/net/ethernet/cisco/enic/ |
D | wq_enet_desc.h | 42 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, in wq_enet_desc_enc() argument 53 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | in wq_enet_desc_enc() 61 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, in wq_enet_desc_dec() argument 76 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> in wq_enet_desc_dec()
|
D | vnic_wq.h | 49 uint8_t cq_entry; /* Gets completion event from hw */ member 121 uint8_t desc_skip_cnt, uint8_t cq_entry, in vnic_wq_post() argument 127 buf->cq_entry = cq_entry; in vnic_wq_post()
|
D | enic_res.h | 34 int offload_mode, int cq_entry, int sop, int eop, int loopback) in enic_queue_wq_desc_ex() argument 46 (u8)eop, (u8)cq_entry, in enic_queue_wq_desc_ex() 53 (u8)cq_entry, compressed_send, wrid); in enic_queue_wq_desc_ex()
|
/linux-6.12.1/drivers/scsi/fnic/ |
D | wq_enet_desc.h | 41 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, in wq_enet_desc_enc() argument 52 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | in wq_enet_desc_enc() 60 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, in wq_enet_desc_dec() argument 75 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> in wq_enet_desc_dec()
|
D | fnic_res.h | 23 int cq_entry, int sop, int eop) in fnic_queue_wq_desc() argument 33 (u8)eop, (u8)cq_entry, in fnic_queue_wq_desc() 47 int cq_entry) in fnic_queue_wq_eth_desc() argument 58 (u8)cq_entry, in fnic_queue_wq_eth_desc()
|
/linux-6.12.1/drivers/scsi/snic/ |
D | wq_enet_desc.h | 39 u8 offload_mode, u8 eop, u8 cq_entry, u8 fcoe_encap, in wq_enet_desc_enc() argument 50 (cq_entry & 1) << WQ_ENET_FLAGS_CQ_ENTRY_SHIFT | in wq_enet_desc_enc() 58 u8 *offload_mode, u8 *eop, u8 *cq_entry, u8 *fcoe_encap, in wq_enet_desc_dec() argument 73 *cq_entry = (u8)((le16_to_cpu(desc->header_length_flags) >> in wq_enet_desc_dec()
|
D | snic_res.h | 55 int cq_entry) in snic_queue_wq_eth_desc() argument 66 (u8)cq_entry, in snic_queue_wq_eth_desc()
|
/linux-6.12.1/drivers/accel/habanalabs/common/ |
D | irq.c | 136 struct hl_cq_entry *cq_entry, *cq_base; in hl_irq_handler_cq() local 149 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci]; in hl_irq_handler_cq() 152 le32_to_cpu(cq_entry->data)); in hl_irq_handler_cq() 163 le32_to_cpu(cq_entry->data)); in hl_irq_handler_cq() 166 le32_to_cpu(cq_entry->data)); in hl_irq_handler_cq() 185 cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) & in hl_irq_handler_cq()
|
/linux-6.12.1/drivers/ufs/core/ |
D | ufshcd-priv.h | 65 struct cq_entry *cqe); 375 q->cq_tail_slot = val / sizeof(struct cq_entry); in ufshcd_mcq_update_cq_tail_slot() 392 writel(q->cq_head_slot * sizeof(struct cq_entry), q->mcq_cq_head); in ufshcd_mcq_update_cq_head() 395 static inline struct cq_entry *ufshcd_mcq_cur_cqe(struct ufs_hw_queue *q) in ufshcd_mcq_cur_cqe() 397 struct cq_entry *cqe = q->cqe_base_addr; in ufshcd_mcq_cur_cqe()
|
D | ufs-mcq.c | 251 cqe_size = sizeof(struct cq_entry) * hwq->max_entries; in ufshcd_mcq_memory_alloc() 288 static int ufshcd_mcq_get_tag(struct ufs_hba *hba, struct cq_entry *cqe) in ufshcd_mcq_get_tag() 305 struct cq_entry *cqe = ufshcd_mcq_cur_cqe(hwq); in ufshcd_mcq_process_cqe()
|
D | ufshcd.c | 825 struct cq_entry *cqe) in ufshcd_get_tr_ocs() 5348 struct cq_entry *cqe) in ufshcd_transfer_rsp_status() 5529 struct cq_entry *cqe) in ufshcd_compl_one_cqe()
|
/linux-6.12.1/include/ufs/ |
D | ufshci.h | 562 struct cq_entry { struct 581 static_assert(sizeof(struct cq_entry) == 32); argument
|
D | ufshcd.h | 1161 struct cq_entry *cqe_base_addr;
|
/linux-6.12.1/drivers/infiniband/sw/siw/ |
D | siw_verbs.c | 1112 rdma_user_mmap_entry_remove(cq->cq_entry); in siw_destroy_cq() 1181 cq->cq_entry = in siw_create_cq() 1184 if (!cq->cq_entry) { in siw_create_cq() 1210 rdma_user_mmap_entry_remove(cq->cq_entry); in siw_create_cq()
|
D | siw.h | 208 struct rdma_user_mmap_entry *cq_entry; /* mmap info for CQE array */ member
|