Lines Matching +full:post +full:- +full:processing
1 // SPDX-License-Identifier: GPL-2.0+
3 * vsp1_dl.c -- R-Car VSP1 Display List
11 #include <linux/dma-mapping.h>
41 * struct vsp1_dl_ext_header - Extended display list header
43 * @pre_ext_dl_num_cmd: number of pre-extended command bodies to parse
44 * @flags: enables or disables execution of the pre and post command
45 * @pre_ext_dl_plist: start address of pre-extended display list bodies
46 * @post_ext_dl_num_cmd: number of post-extended command bodies to parse
47 * @post_ext_dl_plist: start address of post-extended display list bodies
54 * expecting 32-bit accesses. The flags are appropriate to the whole
79 * struct vsp1_pre_ext_dl_body - Pre Extended Display List Body
81 * @flags: Pre-extended command flags. These are specific to each command
82 * @address_set: Source address set pointer. Must have 16-byte alignment
93 * struct vsp1_dl_body - Display list body
121 * struct vsp1_dl_body_pool - display list body pool
145 * struct vsp1_dl_cmd_pool - Display List commands pool
169 * struct vsp1_dl_list - Display list
178 * @post_cmd: post command to be issued through extended dl header
204 * struct vsp1_dl_manager - Display List manager
206 * @singleshot: execute the display list in single-shot mode
231 /* -----------------------------------------------------------------------------
236 * vsp1_dl_body_pool_create - Create a pool of bodies from a single allocation
259 pool->vsp1 = vsp1; in vsp1_dl_body_pool_create()
268 pool->size = dlb_size * num_bodies; in vsp1_dl_body_pool_create()
270 pool->bodies = kcalloc(num_bodies, sizeof(*pool->bodies), GFP_KERNEL); in vsp1_dl_body_pool_create()
271 if (!pool->bodies) { in vsp1_dl_body_pool_create()
276 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma, in vsp1_dl_body_pool_create()
278 if (!pool->mem) { in vsp1_dl_body_pool_create()
279 kfree(pool->bodies); in vsp1_dl_body_pool_create()
284 spin_lock_init(&pool->lock); in vsp1_dl_body_pool_create()
285 INIT_LIST_HEAD(&pool->free); in vsp1_dl_body_pool_create()
288 struct vsp1_dl_body *dlb = &pool->bodies[i]; in vsp1_dl_body_pool_create()
290 dlb->pool = pool; in vsp1_dl_body_pool_create()
291 dlb->max_entries = num_entries; in vsp1_dl_body_pool_create()
293 dlb->dma = pool->dma + i * dlb_size; in vsp1_dl_body_pool_create()
294 dlb->entries = pool->mem + i * dlb_size; in vsp1_dl_body_pool_create()
296 list_add_tail(&dlb->free, &pool->free); in vsp1_dl_body_pool_create()
303 * vsp1_dl_body_pool_destroy - Release a body pool
313 if (pool->mem) in vsp1_dl_body_pool_destroy()
314 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem, in vsp1_dl_body_pool_destroy()
315 pool->dma); in vsp1_dl_body_pool_destroy()
317 kfree(pool->bodies); in vsp1_dl_body_pool_destroy()
322 * vsp1_dl_body_get - Obtain a body from a pool
334 spin_lock_irqsave(&pool->lock, flags); in vsp1_dl_body_get()
336 if (!list_empty(&pool->free)) { in vsp1_dl_body_get()
337 dlb = list_first_entry(&pool->free, struct vsp1_dl_body, free); in vsp1_dl_body_get()
338 list_del(&dlb->free); in vsp1_dl_body_get()
339 refcount_set(&dlb->refcnt, 1); in vsp1_dl_body_get()
342 spin_unlock_irqrestore(&pool->lock, flags); in vsp1_dl_body_get()
348 * vsp1_dl_body_put - Return a body back to its pool
360 if (!refcount_dec_and_test(&dlb->refcnt)) in vsp1_dl_body_put()
363 dlb->num_entries = 0; in vsp1_dl_body_put()
365 spin_lock_irqsave(&dlb->pool->lock, flags); in vsp1_dl_body_put()
366 list_add_tail(&dlb->free, &dlb->pool->free); in vsp1_dl_body_put()
367 spin_unlock_irqrestore(&dlb->pool->lock, flags); in vsp1_dl_body_put()
371 * vsp1_dl_body_write - Write a register to a display list body
382 if (WARN_ONCE(dlb->num_entries >= dlb->max_entries, in vsp1_dl_body_write()
383 "DLB size exceeded (max %u)", dlb->max_entries)) in vsp1_dl_body_write()
386 dlb->entries[dlb->num_entries].addr = reg; in vsp1_dl_body_write()
387 dlb->entries[dlb->num_entries].data = data; in vsp1_dl_body_write()
388 dlb->num_entries++; in vsp1_dl_body_write()
391 /* -----------------------------------------------------------------------------
411 * vsp1_dl_cmd_pool_create - Create a pool of commands from a single allocation
434 pool->vsp1 = vsp1; in vsp1_dl_cmd_pool_create()
436 spin_lock_init(&pool->lock); in vsp1_dl_cmd_pool_create()
437 INIT_LIST_HEAD(&pool->free); in vsp1_dl_cmd_pool_create()
439 pool->cmds = kcalloc(num_cmds, sizeof(*pool->cmds), GFP_KERNEL); in vsp1_dl_cmd_pool_create()
440 if (!pool->cmds) { in vsp1_dl_cmd_pool_create()
449 pool->size = cmd_size * num_cmds; in vsp1_dl_cmd_pool_create()
450 pool->mem = dma_alloc_wc(vsp1->bus_master, pool->size, &pool->dma, in vsp1_dl_cmd_pool_create()
452 if (!pool->mem) { in vsp1_dl_cmd_pool_create()
453 kfree(pool->cmds); in vsp1_dl_cmd_pool_create()
459 struct vsp1_dl_ext_cmd *cmd = &pool->cmds[i]; in vsp1_dl_cmd_pool_create()
465 cmd->pool = pool; in vsp1_dl_cmd_pool_create()
466 cmd->opcode = vsp1_extended_commands[type].opcode; in vsp1_dl_cmd_pool_create()
469 * TODO: Auto-disp can utilise more than one extended body in vsp1_dl_cmd_pool_create()
472 cmd->num_cmds = 1; in vsp1_dl_cmd_pool_create()
473 cmd->cmds = pool->mem + cmd_offset; in vsp1_dl_cmd_pool_create()
474 cmd->cmd_dma = pool->dma + cmd_offset; in vsp1_dl_cmd_pool_create()
476 cmd->data = pool->mem + data_offset; in vsp1_dl_cmd_pool_create()
477 cmd->data_dma = pool->dma + data_offset; in vsp1_dl_cmd_pool_create()
479 list_add_tail(&cmd->free, &pool->free); in vsp1_dl_cmd_pool_create()
491 spin_lock_irqsave(&pool->lock, flags); in vsp1_dl_ext_cmd_get()
493 if (!list_empty(&pool->free)) { in vsp1_dl_ext_cmd_get()
494 cmd = list_first_entry(&pool->free, struct vsp1_dl_ext_cmd, in vsp1_dl_ext_cmd_get()
496 list_del(&cmd->free); in vsp1_dl_ext_cmd_get()
499 spin_unlock_irqrestore(&pool->lock, flags); in vsp1_dl_ext_cmd_get()
512 cmd->flags = 0; in vsp1_dl_ext_cmd_put()
514 spin_lock_irqsave(&cmd->pool->lock, flags); in vsp1_dl_ext_cmd_put()
515 list_add_tail(&cmd->free, &cmd->pool->free); in vsp1_dl_ext_cmd_put()
516 spin_unlock_irqrestore(&cmd->pool->lock, flags); in vsp1_dl_ext_cmd_put()
524 if (pool->mem) in vsp1_dl_ext_cmd_pool_destroy()
525 dma_free_wc(pool->vsp1->bus_master, pool->size, pool->mem, in vsp1_dl_ext_cmd_pool_destroy()
526 pool->dma); in vsp1_dl_ext_cmd_pool_destroy()
528 kfree(pool->cmds); in vsp1_dl_ext_cmd_pool_destroy()
534 struct vsp1_dl_manager *dlm = dl->dlm; in vsp1_dl_get_pre_cmd()
536 if (dl->pre_cmd) in vsp1_dl_get_pre_cmd()
537 return dl->pre_cmd; in vsp1_dl_get_pre_cmd()
539 dl->pre_cmd = vsp1_dl_ext_cmd_get(dlm->cmdpool); in vsp1_dl_get_pre_cmd()
541 return dl->pre_cmd; in vsp1_dl_get_pre_cmd()
544 /* ----------------------------------------------------------------------------
557 INIT_LIST_HEAD(&dl->bodies); in vsp1_dl_list_alloc()
558 dl->dlm = dlm; in vsp1_dl_list_alloc()
561 dl->body0 = vsp1_dl_body_get(dlm->pool); in vsp1_dl_list_alloc()
562 if (!dl->body0) { in vsp1_dl_list_alloc()
567 header_offset = dl->body0->max_entries * sizeof(*dl->body0->entries); in vsp1_dl_list_alloc()
569 dl->header = ((void *)dl->body0->entries) + header_offset; in vsp1_dl_list_alloc()
570 dl->dma = dl->body0->dma + header_offset; in vsp1_dl_list_alloc()
572 memset(dl->header, 0, sizeof(*dl->header)); in vsp1_dl_list_alloc()
573 dl->header->lists[0].addr = dl->body0->dma; in vsp1_dl_list_alloc()
582 list_for_each_entry_safe(dlb, tmp, &dl->bodies, list) { in vsp1_dl_list_bodies_put()
583 list_del(&dlb->list); in vsp1_dl_list_bodies_put()
590 vsp1_dl_body_put(dl->body0); in vsp1_dl_list_free()
597 * vsp1_dl_list_get - Get a free display list
609 spin_lock_irqsave(&dlm->lock, flags); in vsp1_dl_list_get()
611 if (!list_empty(&dlm->free)) { in vsp1_dl_list_get()
612 dl = list_first_entry(&dlm->free, struct vsp1_dl_list, list); in vsp1_dl_list_get()
613 list_del(&dl->list); in vsp1_dl_list_get()
619 INIT_LIST_HEAD(&dl->chain); in vsp1_dl_list_get()
622 spin_unlock_irqrestore(&dlm->lock, flags); in vsp1_dl_list_get()
636 * Release any linked display-lists which were chained for a single in __vsp1_dl_list_put()
639 if (dl->has_chain) { in __vsp1_dl_list_put()
640 list_for_each_entry(dl_next, &dl->chain, chain) in __vsp1_dl_list_put()
644 dl->has_chain = false; in __vsp1_dl_list_put()
648 vsp1_dl_ext_cmd_put(dl->pre_cmd); in __vsp1_dl_list_put()
649 vsp1_dl_ext_cmd_put(dl->post_cmd); in __vsp1_dl_list_put()
651 dl->pre_cmd = NULL; in __vsp1_dl_list_put()
652 dl->post_cmd = NULL; in __vsp1_dl_list_put()
658 dl->body0->num_entries = 0; in __vsp1_dl_list_put()
660 list_add_tail(&dl->list, &dl->dlm->free); in __vsp1_dl_list_put()
664 * vsp1_dl_list_put - Release a display list
679 spin_lock_irqsave(&dl->dlm->lock, flags); in vsp1_dl_list_put()
681 spin_unlock_irqrestore(&dl->dlm->lock, flags); in vsp1_dl_list_put()
685 * vsp1_dl_list_get_body0 - Obtain the default body for the display list
693 return dl->body0; in vsp1_dl_list_get_body0()
697 * vsp1_dl_list_add_body - Add a body to the display list
714 refcount_inc(&dlb->refcnt); in vsp1_dl_list_add_body()
716 list_add_tail(&dlb->list, &dl->bodies); in vsp1_dl_list_add_body()
722 * vsp1_dl_list_add_chain - Add a display list to a chain
729 * display list in the chain has completed processing.
738 head->has_chain = true; in vsp1_dl_list_add_chain()
739 list_add_tail(&dl->chain, &head->chain); in vsp1_dl_list_add_chain()
745 cmd->cmds[0].opcode = cmd->opcode; in vsp1_dl_ext_cmd_fill_header()
746 cmd->cmds[0].flags = cmd->flags; in vsp1_dl_ext_cmd_fill_header()
747 cmd->cmds[0].address_set = cmd->data_dma; in vsp1_dl_ext_cmd_fill_header()
748 cmd->cmds[0].reserved = 0; in vsp1_dl_ext_cmd_fill_header()
753 struct vsp1_dl_manager *dlm = dl->dlm; in vsp1_dl_list_fill_header()
754 struct vsp1_dl_header_list *hdr = dl->header->lists; in vsp1_dl_list_fill_header()
764 hdr->num_bytes = dl->body0->num_entries in vsp1_dl_list_fill_header()
765 * sizeof(*dl->header->lists); in vsp1_dl_list_fill_header()
767 list_for_each_entry(dlb, &dl->bodies, list) { in vsp1_dl_list_fill_header()
771 hdr->addr = dlb->dma; in vsp1_dl_list_fill_header()
772 hdr->num_bytes = dlb->num_entries in vsp1_dl_list_fill_header()
773 * sizeof(*dl->header->lists); in vsp1_dl_list_fill_header()
776 dl->header->num_lists = num_lists; in vsp1_dl_list_fill_header()
777 dl->header->flags = 0; in vsp1_dl_list_fill_header()
786 if (!dlm->singleshot || is_last) in vsp1_dl_list_fill_header()
787 dl->header->flags |= VSP1_DLH_INT_ENABLE; in vsp1_dl_list_fill_header()
790 * In continuous mode enable auto-start for all lists, as the VSP must in vsp1_dl_list_fill_header()
792 * enable auto-start for all lists but the last to chain processing of in vsp1_dl_list_fill_header()
795 if (!dlm->singleshot || !is_last) in vsp1_dl_list_fill_header()
796 dl->header->flags |= VSP1_DLH_AUTO_START; in vsp1_dl_list_fill_header()
801 * next item for automatic processing by the hardware. in vsp1_dl_list_fill_header()
805 dl->header->next_header = next->dma; in vsp1_dl_list_fill_header()
806 } else if (!dlm->singleshot) { in vsp1_dl_list_fill_header()
812 dl->header->next_header = dl->dma; in vsp1_dl_list_fill_header()
815 if (!dl->extension) in vsp1_dl_list_fill_header()
818 dl->extension->flags = 0; in vsp1_dl_list_fill_header()
820 if (dl->pre_cmd) { in vsp1_dl_list_fill_header()
821 dl->extension->pre_ext_dl_plist = dl->pre_cmd->cmd_dma; in vsp1_dl_list_fill_header()
822 dl->extension->pre_ext_dl_num_cmd = dl->pre_cmd->num_cmds; in vsp1_dl_list_fill_header()
823 dl->extension->flags |= VSP1_DLH_EXT_PRE_CMD_EXEC; in vsp1_dl_list_fill_header()
825 vsp1_dl_ext_cmd_fill_header(dl->pre_cmd); in vsp1_dl_list_fill_header()
828 if (dl->post_cmd) { in vsp1_dl_list_fill_header()
829 dl->extension->post_ext_dl_plist = dl->post_cmd->cmd_dma; in vsp1_dl_list_fill_header()
830 dl->extension->post_ext_dl_num_cmd = dl->post_cmd->num_cmds; in vsp1_dl_list_fill_header()
831 dl->extension->flags |= VSP1_DLH_EXT_POST_CMD_EXEC; in vsp1_dl_list_fill_header()
833 vsp1_dl_ext_cmd_fill_header(dl->post_cmd); in vsp1_dl_list_fill_header()
839 struct vsp1_device *vsp1 = dlm->vsp1; in vsp1_dl_list_hw_update_pending()
841 if (!dlm->queued) in vsp1_dl_list_hw_update_pending()
848 return !!(vsp1_read(vsp1, VI6_CMD(dlm->index)) & VI6_CMD_UPDHDR); in vsp1_dl_list_hw_update_pending()
853 struct vsp1_dl_manager *dlm = dl->dlm; in vsp1_dl_list_hw_enqueue()
854 struct vsp1_device *vsp1 = dlm->vsp1; in vsp1_dl_list_hw_enqueue()
858 * (single-shot mode or first frame in continuous mode) it will then be in vsp1_dl_list_hw_enqueue()
863 vsp1_write(vsp1, VI6_DL_HDR_ADDR(dlm->index), dl->dma); in vsp1_dl_list_hw_enqueue()
868 struct vsp1_dl_manager *dlm = dl->dlm; in vsp1_dl_list_commit_continuous()
872 * processed yet, the VSP can start processing it at any time. In that in vsp1_dl_list_commit_continuous()
886 WARN_ON(dlm->pending && in vsp1_dl_list_commit_continuous()
887 (dlm->pending->flags & VSP1_DL_FRAME_END_INTERNAL)); in vsp1_dl_list_commit_continuous()
888 __vsp1_dl_list_put(dlm->pending); in vsp1_dl_list_commit_continuous()
889 dlm->pending = dl; in vsp1_dl_list_commit_continuous()
895 * will become active when the hardware starts processing it. in vsp1_dl_list_commit_continuous()
899 __vsp1_dl_list_put(dlm->queued); in vsp1_dl_list_commit_continuous()
900 dlm->queued = dl; in vsp1_dl_list_commit_continuous()
905 struct vsp1_dl_manager *dlm = dl->dlm; in vsp1_dl_list_commit_singleshot()
908 * When working in single-shot mode, the caller guarantees that the in vsp1_dl_list_commit_singleshot()
914 dlm->active = dl; in vsp1_dl_list_commit_singleshot()
919 struct vsp1_dl_manager *dlm = dl->dlm; in vsp1_dl_list_commit()
924 vsp1_dl_list_fill_header(dl, list_empty(&dl->chain)); in vsp1_dl_list_commit()
926 list_for_each_entry(dl_next, &dl->chain, chain) { in vsp1_dl_list_commit()
927 bool last = list_is_last(&dl_next->chain, &dl->chain); in vsp1_dl_list_commit()
932 dl->flags = dl_flags & ~VSP1_DL_FRAME_END_COMPLETED; in vsp1_dl_list_commit()
934 spin_lock_irqsave(&dlm->lock, flags); in vsp1_dl_list_commit()
936 if (dlm->singleshot) in vsp1_dl_list_commit()
941 spin_unlock_irqrestore(&dlm->lock, flags); in vsp1_dl_list_commit()
944 /* -----------------------------------------------------------------------------
949 * vsp1_dlm_irq_frame_end - Display list handler for the frame end interrupt
958 * set in single-shot mode as display list processing is then not continuous and
971 struct vsp1_device *vsp1 = dlm->vsp1; in vsp1_dlm_irq_frame_end()
975 spin_lock(&dlm->lock); in vsp1_dlm_irq_frame_end()
978 * The mem-to-mem pipelines work in single-shot mode. No new display in vsp1_dlm_irq_frame_end()
981 if (dlm->singleshot) { in vsp1_dlm_irq_frame_end()
982 __vsp1_dl_list_put(dlm->active); in vsp1_dlm_irq_frame_end()
983 dlm->active = NULL; in vsp1_dlm_irq_frame_end()
990 * the frame end event but before interrupt processing, the hardware in vsp1_dlm_irq_frame_end()
1002 if (status & VI6_STATUS_FLD_STD(dlm->index)) in vsp1_dlm_irq_frame_end()
1011 if (dlm->active && (dlm->active->flags & VSP1_DL_FRAME_END_WRITEBACK)) { in vsp1_dlm_irq_frame_end()
1013 dlm->active->flags &= ~VSP1_DL_FRAME_END_WRITEBACK; in vsp1_dlm_irq_frame_end()
1017 * The device starts processing the queued display list right after the in vsp1_dlm_irq_frame_end()
1020 if (dlm->queued) { in vsp1_dlm_irq_frame_end()
1021 if (dlm->queued->flags & VSP1_DL_FRAME_END_INTERNAL) in vsp1_dlm_irq_frame_end()
1023 dlm->queued->flags &= ~VSP1_DL_FRAME_END_INTERNAL; in vsp1_dlm_irq_frame_end()
1025 __vsp1_dl_list_put(dlm->active); in vsp1_dlm_irq_frame_end()
1026 dlm->active = dlm->queued; in vsp1_dlm_irq_frame_end()
1027 dlm->queued = NULL; in vsp1_dlm_irq_frame_end()
1032 * Now that the VSP has started processing the queued display list, we in vsp1_dlm_irq_frame_end()
1036 if (dlm->pending) { in vsp1_dlm_irq_frame_end()
1037 vsp1_dl_list_hw_enqueue(dlm->pending); in vsp1_dlm_irq_frame_end()
1038 dlm->queued = dlm->pending; in vsp1_dlm_irq_frame_end()
1039 dlm->pending = NULL; in vsp1_dlm_irq_frame_end()
1043 spin_unlock(&dlm->lock); in vsp1_dlm_irq_frame_end()
1059 for (i = 0; i < vsp1->info->wpf_count; ++i) in vsp1_dlm_setup()
1071 spin_lock_irqsave(&dlm->lock, flags); in vsp1_dlm_reset()
1073 __vsp1_dl_list_put(dlm->active); in vsp1_dlm_reset()
1074 __vsp1_dl_list_put(dlm->queued); in vsp1_dlm_reset()
1075 __vsp1_dl_list_put(dlm->pending); in vsp1_dlm_reset()
1077 spin_unlock_irqrestore(&dlm->lock, flags); in vsp1_dlm_reset()
1079 dlm->active = NULL; in vsp1_dlm_reset()
1080 dlm->queued = NULL; in vsp1_dlm_reset()
1081 dlm->pending = NULL; in vsp1_dlm_reset()
1086 return vsp1_dl_body_get(dlm->pool); in vsp1_dlm_dl_body_get()
1097 dlm = devm_kzalloc(vsp1->dev, sizeof(*dlm), GFP_KERNEL); in vsp1_dlm_create()
1101 dlm->index = index; in vsp1_dlm_create()
1102 dlm->singleshot = vsp1->info->uapi; in vsp1_dlm_create()
1103 dlm->vsp1 = vsp1; in vsp1_dlm_create()
1105 spin_lock_init(&dlm->lock); in vsp1_dlm_create()
1106 INIT_LIST_HEAD(&dlm->free); in vsp1_dlm_create()
1121 dlm->pool = vsp1_dl_body_pool_create(vsp1, prealloc + 1, in vsp1_dlm_create()
1123 if (!dlm->pool) in vsp1_dlm_create()
1137 dl->extension = (void *)dl->header in vsp1_dlm_create()
1138 + sizeof(*dl->header); in vsp1_dlm_create()
1140 list_add_tail(&dl->list, &dlm->free); in vsp1_dlm_create()
1144 dlm->cmdpool = vsp1_dl_cmd_pool_create(vsp1, in vsp1_dlm_create()
1146 if (!dlm->cmdpool) { in vsp1_dlm_create()
1162 list_for_each_entry_safe(dl, next, &dlm->free, list) { in vsp1_dlm_destroy()
1163 list_del(&dl->list); in vsp1_dlm_destroy()
1167 vsp1_dl_body_pool_destroy(dlm->pool); in vsp1_dlm_destroy()
1168 vsp1_dl_ext_cmd_pool_destroy(dlm->cmdpool); in vsp1_dlm_destroy()