Lines Matching refs:dbc
873 bo->dbc = &qdev->dbc[hdr->dbc_id]; in qaic_prepare_bo()
898 bo->dbc = NULL; in qaic_unprepare_bo()
927 if (bo->total_slice_nents > bo->dbc->nelem) { in qaic_attach_slicing_bo()
940 struct dma_bridge_chan *dbc; in qaic_attach_slice_bo_ioctl() local
1015 dbc = &qdev->dbc[args->hdr.dbc_id]; in qaic_attach_slice_bo_ioctl()
1016 rcu_id = srcu_read_lock(&dbc->ch_lock); in qaic_attach_slice_bo_ioctl()
1017 if (dbc->usr != usr) { in qaic_attach_slice_bo_ioctl()
1034 list_add_tail(&bo->bo_list, &bo->dbc->bo_lists); in qaic_attach_slice_bo_ioctl()
1035 srcu_read_unlock(&dbc->ch_lock, rcu_id); in qaic_attach_slice_bo_ioctl()
1046 srcu_read_unlock(&dbc->ch_lock, rcu_id); in qaic_attach_slice_bo_ioctl()
1073 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; in copy_exec_reqs() local
1078 avail = fifo_space_avail(head, tail, dbc->nelem); in copy_exec_reqs()
1082 if (tail + slice->nents > dbc->nelem) { in copy_exec_reqs()
1083 avail = dbc->nelem - tail; in copy_exec_reqs()
1085 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); in copy_exec_reqs()
1089 memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail); in copy_exec_reqs()
1091 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); in copy_exec_reqs()
1094 *ptail = (tail + slice->nents) % dbc->nelem; in copy_exec_reqs()
1100 u64 resize, struct dma_bridge_chan *dbc, u32 head, in copy_partial_exec_reqs() argument
1110 avail = fifo_space_avail(head, tail, dbc->nelem); in copy_partial_exec_reqs()
1129 if (tail + first_n > dbc->nelem) { in copy_partial_exec_reqs()
1130 avail = dbc->nelem - tail; in copy_partial_exec_reqs()
1132 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); in copy_partial_exec_reqs()
1136 memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail); in copy_partial_exec_reqs()
1138 memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n); in copy_partial_exec_reqs()
1146 last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem); in copy_partial_exec_reqs()
1161 *ptail = (tail + first_n + 1) % dbc->nelem; in copy_partial_exec_reqs()
1168 bool is_partial, struct dma_bridge_chan *dbc, u32 head, in send_bo_list_to_device() argument
1206 spin_lock_irqsave(&dbc->xfer_lock, flags); in send_bo_list_to_device()
1208 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in send_bo_list_to_device()
1213 bo->req_id = dbc->next_req_id++; in send_bo_list_to_device()
1221 ret = copy_partial_exec_reqs(qdev, slice, 0, dbc, head, tail); in send_bo_list_to_device()
1225 pexec[i].resize - slice->offset, dbc, in send_bo_list_to_device()
1228 ret = copy_exec_reqs(qdev, slice, dbc->id, head, tail); in send_bo_list_to_device()
1230 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in send_bo_list_to_device()
1235 list_add_tail(&bo->xfer_list, &dbc->xfer_list); in send_bo_list_to_device()
1236 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in send_bo_list_to_device()
1249 spin_lock_irqsave(&dbc->xfer_lock, flags); in send_bo_list_to_device()
1250 bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); in send_bo_list_to_device()
1253 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in send_bo_list_to_device()
1295 struct dma_bridge_chan *dbc; in __qaic_execute_bo_ioctl() local
1347 dbc = &qdev->dbc[args->hdr.dbc_id]; in __qaic_execute_bo_ioctl()
1349 rcu_id = srcu_read_lock(&dbc->ch_lock); in __qaic_execute_bo_ioctl()
1350 if (!dbc->usr || dbc->usr->handle != usr->handle) { in __qaic_execute_bo_ioctl()
1355 head = readl(dbc->dbc_base + REQHP_OFF); in __qaic_execute_bo_ioctl()
1356 tail = readl(dbc->dbc_base + REQTP_OFF); in __qaic_execute_bo_ioctl()
1364 queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); in __qaic_execute_bo_ioctl()
1366 ret = send_bo_list_to_device(qdev, file_priv, exec, args->hdr.count, is_partial, dbc, in __qaic_execute_bo_ioctl()
1373 writel(tail, dbc->dbc_base + REQTP_OFF); in __qaic_execute_bo_ioctl()
1379 schedule_work(&dbc->poll_work); in __qaic_execute_bo_ioctl()
1382 srcu_read_unlock(&dbc->ch_lock, rcu_id); in __qaic_execute_bo_ioctl()
1437 struct dma_bridge_chan *dbc = data; in dbc_irq_handler() local
1442 rcu_id = srcu_read_lock(&dbc->ch_lock); in dbc_irq_handler()
1445 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_handler()
1454 if (!dbc->usr) { in dbc_irq_handler()
1455 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_handler()
1459 head = readl(dbc->dbc_base + RSPHP_OFF); in dbc_irq_handler()
1461 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_handler()
1465 tail = readl(dbc->dbc_base + RSPTP_OFF); in dbc_irq_handler()
1467 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_handler()
1472 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_handler()
1476 if (!dbc->qdev->single_msi) in dbc_irq_handler()
1478 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_handler()
1484 struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work); in irq_polling_work() local
1490 rcu_id = srcu_read_lock(&dbc->ch_lock); in irq_polling_work()
1493 if (dbc->qdev->dev_state != QAIC_ONLINE) { in irq_polling_work()
1494 srcu_read_unlock(&dbc->ch_lock, rcu_id); in irq_polling_work()
1497 if (!dbc->usr) { in irq_polling_work()
1498 srcu_read_unlock(&dbc->ch_lock, rcu_id); in irq_polling_work()
1501 spin_lock_irqsave(&dbc->xfer_lock, flags); in irq_polling_work()
1502 if (list_empty(&dbc->xfer_list)) { in irq_polling_work()
1503 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in irq_polling_work()
1504 srcu_read_unlock(&dbc->ch_lock, rcu_id); in irq_polling_work()
1507 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in irq_polling_work()
1509 head = readl(dbc->dbc_base + RSPHP_OFF); in irq_polling_work()
1511 srcu_read_unlock(&dbc->ch_lock, rcu_id); in irq_polling_work()
1515 tail = readl(dbc->dbc_base + RSPTP_OFF); in irq_polling_work()
1517 srcu_read_unlock(&dbc->ch_lock, rcu_id); in irq_polling_work()
1522 irq_wake_thread(dbc->irq, dbc); in irq_polling_work()
1523 srcu_read_unlock(&dbc->ch_lock, rcu_id); in irq_polling_work()
1534 struct dma_bridge_chan *dbc = data; in dbc_irq_threaded_fn() local
1547 rcu_id = srcu_read_lock(&dbc->ch_lock); in dbc_irq_threaded_fn()
1548 qdev = dbc->qdev; in dbc_irq_threaded_fn()
1550 head = readl(dbc->dbc_base + RSPHP_OFF); in dbc_irq_threaded_fn()
1565 if (!dbc->usr) in dbc_irq_threaded_fn()
1568 tail = readl(dbc->dbc_base + RSPTP_OFF); in dbc_irq_threaded_fn()
1586 rsp = dbc->rsp_q_base + head * sizeof(*rsp); in dbc_irq_threaded_fn()
1591 spin_lock_irqsave(&dbc->xfer_lock, flags); in dbc_irq_threaded_fn()
1598 list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) { in dbc_irq_threaded_fn()
1619 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in dbc_irq_threaded_fn()
1620 head = (head + 1) % dbc->nelem; in dbc_irq_threaded_fn()
1627 writel(head, dbc->dbc_base + RSPHP_OFF); in dbc_irq_threaded_fn()
1636 schedule_work(&dbc->poll_work); in dbc_irq_threaded_fn()
1638 tail = readl(dbc->dbc_base + RSPTP_OFF); in dbc_irq_threaded_fn()
1644 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_threaded_fn()
1648 srcu_read_unlock(&dbc->ch_lock, rcu_id); in dbc_irq_threaded_fn()
1652 schedule_work(&dbc->poll_work); in dbc_irq_threaded_fn()
1661 struct dma_bridge_chan *dbc; in qaic_wait_bo_ioctl() local
1692 dbc = &qdev->dbc[args->dbc_id]; in qaic_wait_bo_ioctl()
1694 rcu_id = srcu_read_lock(&dbc->ch_lock); in qaic_wait_bo_ioctl()
1695 if (dbc->usr != usr) { in qaic_wait_bo_ioctl()
1717 if (!dbc->usr) in qaic_wait_bo_ioctl()
1723 srcu_read_unlock(&dbc->ch_lock, rcu_id); in qaic_wait_bo_ioctl()
1822 struct dma_bridge_chan *dbc; in qaic_detach_slice_bo_ioctl() local
1863 dbc = bo->dbc; in qaic_detach_slice_bo_ioctl()
1864 rcu_id = srcu_read_lock(&dbc->ch_lock); in qaic_detach_slice_bo_ioctl()
1865 if (dbc->usr != usr) { in qaic_detach_slice_bo_ioctl()
1871 spin_lock_irqsave(&dbc->xfer_lock, flags); in qaic_detach_slice_bo_ioctl()
1873 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in qaic_detach_slice_bo_ioctl()
1877 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in qaic_detach_slice_bo_ioctl()
1882 srcu_read_unlock(&dbc->ch_lock, rcu_id); in qaic_detach_slice_bo_ioctl()
1894 static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) in empty_xfer_list() argument
1899 spin_lock_irqsave(&dbc->xfer_lock, flags); in empty_xfer_list()
1900 while (!list_empty(&dbc->xfer_list)) { in empty_xfer_list()
1901 bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); in empty_xfer_list()
1903 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in empty_xfer_list()
1913 spin_lock_irqsave(&dbc->xfer_lock, flags); in empty_xfer_list()
1915 spin_unlock_irqrestore(&dbc->xfer_lock, flags); in empty_xfer_list()
1920 if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle) in disable_dbc()
1923 qdev->dbc[dbc_id].usr = NULL; in disable_dbc()
1924 synchronize_srcu(&qdev->dbc[dbc_id].ch_lock); in disable_dbc()
1938 qdev->dbc[dbc_id].usr = usr; in enable_dbc()
1943 struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; in wakeup_dbc() local
1945 dbc->usr = NULL; in wakeup_dbc()
1946 empty_xfer_list(qdev, dbc); in wakeup_dbc()
1947 synchronize_srcu(&dbc->ch_lock); in wakeup_dbc()
1952 empty_xfer_list(qdev, dbc); in wakeup_dbc()
1958 struct dma_bridge_chan *dbc; in release_dbc() local
1960 dbc = &qdev->dbc[dbc_id]; in release_dbc()
1961 if (!dbc->in_use) in release_dbc()
1966 dma_free_coherent(&qdev->pdev->dev, dbc->total_size, dbc->req_q_base, dbc->dma_addr); in release_dbc()
1967 dbc->total_size = 0; in release_dbc()
1968 dbc->req_q_base = NULL; in release_dbc()
1969 dbc->dma_addr = 0; in release_dbc()
1970 dbc->nelem = 0; in release_dbc()
1971 dbc->usr = NULL; in release_dbc()
1973 list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) { in release_dbc()
1981 dbc->in_use = false; in release_dbc()
1982 wake_up(&dbc->dbc_release); in release_dbc()
1985 void qaic_data_get_fifo_info(struct dma_bridge_chan *dbc, u32 *head, u32 *tail) in qaic_data_get_fifo_info() argument
1987 if (!dbc || !head || !tail) in qaic_data_get_fifo_info()
1990 *head = readl(dbc->dbc_base + REQHP_OFF); in qaic_data_get_fifo_info()
1991 *tail = readl(dbc->dbc_base + REQTP_OFF); in qaic_data_get_fifo_info()