Home
last modified time | relevance | path

Searched refs:prq (Results 1 – 6 of 6) sorted by relevance

/linux-6.12.1/drivers/net/ethernet/qlogic/netxen/
Dnetxen_nic_ctx.c256 nx_hostrq_rx_ctx_t *prq; in nx_fw_cmd_create_rx_ctx() local
289 prq = addr; in nx_fw_cmd_create_rx_ctx()
299 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); in nx_fw_cmd_create_rx_ctx()
307 prq->capabilities[0] = cpu_to_le32(cap); in nx_fw_cmd_create_rx_ctx()
308 prq->host_int_crb_mode = in nx_fw_cmd_create_rx_ctx()
310 prq->host_rds_crb_mode = in nx_fw_cmd_create_rx_ctx()
313 prq->num_rds_rings = cpu_to_le16(nrds_rings); in nx_fw_cmd_create_rx_ctx()
314 prq->num_sds_rings = cpu_to_le16(nsds_rings); in nx_fw_cmd_create_rx_ctx()
315 prq->rds_ring_offset = cpu_to_le32(0); in nx_fw_cmd_create_rx_ctx()
317 val = le32_to_cpu(prq->rds_ring_offset) + in nx_fw_cmd_create_rx_ctx()
[all …]
/linux-6.12.1/drivers/net/ethernet/qlogic/qlcnic/
Dqlcnic_ctx.c247 struct qlcnic_hostrq_rx_ctx *prq; in qlcnic_82xx_fw_cmd_create_rx_ctx() local
269 prq = addr; in qlcnic_82xx_fw_cmd_create_rx_ctx()
279 prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); in qlcnic_82xx_fw_cmd_create_rx_ctx()
290 prq->valid_field_offset = cpu_to_le16(temp_u16); in qlcnic_82xx_fw_cmd_create_rx_ctx()
291 prq->txrx_sds_binding = nsds_rings - 1; in qlcnic_82xx_fw_cmd_create_rx_ctx()
293 prq->host_int_crb_mode = cpu_to_le32(temp_intr_crb_mode); in qlcnic_82xx_fw_cmd_create_rx_ctx()
295 prq->host_rds_crb_mode = cpu_to_le32(temp_rds_crb_mode); in qlcnic_82xx_fw_cmd_create_rx_ctx()
298 prq->capabilities[0] = cpu_to_le32(cap); in qlcnic_82xx_fw_cmd_create_rx_ctx()
300 prq->num_rds_rings = cpu_to_le16(nrds_rings); in qlcnic_82xx_fw_cmd_create_rx_ctx()
301 prq->num_sds_rings = cpu_to_le16(nsds_rings); in qlcnic_82xx_fw_cmd_create_rx_ctx()
[all …]
/linux-6.12.1/drivers/iommu/intel/
Dsvm.c35 iommu->prq = iommu_alloc_pages_node(iommu->node, GFP_KERNEL, PRQ_ORDER); in intel_svm_enable_prq()
36 if (!iommu->prq) { in intel_svm_enable_prq()
72 dmar_writeq(iommu->reg + DMAR_PQA_REG, virt_to_phys(iommu->prq) | PRQ_ORDER); in intel_svm_enable_prq()
85 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_svm_enable_prq()
86 iommu->prq = NULL; in intel_svm_enable_prq()
108 iommu_free_pages(iommu->prq, PRQ_ORDER); in intel_svm_finish_prq()
109 iommu->prq = NULL; in intel_svm_finish_prq()
331 req = &iommu->prq[head / sizeof(*req)]; in intel_drain_pasid_prq()
448 req = &iommu->prq[head / sizeof(*req)]; in prq_event_thread()
Diommu.h734 struct page_req_dsc *prq; member
/linux-6.12.1/drivers/scsi/elx/efct/
Defct_hw.c1178 struct efc_hw_rq_buffer *prq; in efct_hw_rx_buffer_alloc() local
1189 for (i = 0, prq = rq_buf; i < count; i ++, prq++) { in efct_hw_rx_buffer_alloc()
1190 prq->rqindex = rqindex; in efct_hw_rx_buffer_alloc()
1191 prq->dma.size = size; in efct_hw_rx_buffer_alloc()
1192 prq->dma.virt = dma_alloc_coherent(&efct->pci->dev, in efct_hw_rx_buffer_alloc()
1193 prq->dma.size, in efct_hw_rx_buffer_alloc()
1194 &prq->dma.phys, in efct_hw_rx_buffer_alloc()
1196 if (!prq->dma.virt) { in efct_hw_rx_buffer_alloc()
1212 struct efc_hw_rq_buffer *prq; in efct_hw_rx_buffer_free() local
1215 for (i = 0, prq = rq_buf; i < count; i++, prq++) { in efct_hw_rx_buffer_free()
[all …]
/linux-6.12.1/scripts/dtc/
Dchecks.c169 struct check *prq = c->prereq[i]; in run_check() local
170 error = error || run_check(prq, dti); in run_check()
171 if (prq->status != PASSED) { in run_check()