Lines Matching +full:sub +full:- +full:components
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Author: Ping-Hsun Wu <ping-hsun.wu@mediatek.com>
9 #include "mtk-mdp3-cfg.h"
10 #include "mtk-mdp3-cmdq.h"
11 #include "mtk-mdp3-comp.h"
12 #include "mtk-mdp3-core.h"
13 #include "mtk-mdp3-m2m.h"
14 #include "mtk-img-ipi.h"
29 ((ctx)->comp->ops && (ctx)->comp->ops->op)
31 (has_op(ctx, op) ? (ctx)->comp->ops->op(ctx, ##__VA_ARGS__) : 0)
55 return mdp_dev->mm_subsys[p->sub_id].mdp_mutex[p->mutex_id]; in __get_mutex()
98 dev_err(&mdp_dev->pdev->dev, "Unknown pipeline id %d", id); in __get_pipe()
109 const int p_id = mdp->mdp_data->mdp_plat_id; in __get_config_offset()
110 struct device *dev = &mdp->pdev->dev; in __get_config_offset()
112 long bound = mdp->vpu.config_size; in __get_config_offset()
114 if (pp_idx >= mdp->mdp_data->pp_used) in __get_config_offset()
118 cfg_c = CFG_OFST(MT8183, param->config, pp_idx); in __get_config_offset()
120 cfg_c = CFG_OFST(MT8195, param->config, pp_idx); in __get_config_offset()
125 cfg_n = CFG_OFST(MT8183, param->config, pp_idx + 1); in __get_config_offset()
127 cfg_n = CFG_OFST(MT8195, param->config, pp_idx + 1); in __get_config_offset()
131 if ((long)cfg_n - (long)mdp->vpu.config > bound) { in __get_config_offset()
133 cfg_c = ERR_PTR(-EFAULT); in __get_config_offset()
139 cfg_c = ERR_PTR(-EINVAL); in __get_config_offset()
147 const int p_id = path->mdp_dev->mdp_data->mdp_plat_id; in mdp_path_subfrm_require()
149 const struct mtk_mdp_driver_data *data = path->mdp_dev->mdp_data; in mdp_path_subfrm_require()
155 num_comp = CFG_GET(MT8183, path->config, num_components); in mdp_path_subfrm_require()
157 num_comp = CFG_GET(MT8195, path->config, num_components); in mdp_path_subfrm_require()
160 index = __get_pipe(path->mdp_dev, path->comps[0].comp->public_id); in mdp_path_subfrm_require()
161 memcpy(p, &data->pipe_info[index], sizeof(struct mdp_pipe_info)); in mdp_path_subfrm_require()
162 mutex = __get_mutex(path->mdp_dev, p); in mdp_path_subfrm_require()
171 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_subfrm_require()
173 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_subfrm_require()
175 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_subfrm_require()
178 ctx = &path->comps[index]; in mdp_path_subfrm_require()
179 if (is_output_disabled(p_id, ctx->param, count)) in mdp_path_subfrm_require()
182 mutex_idx = data->mdp_mutex_table_idx; in mdp_path_subfrm_require()
183 id = ctx->comp->public_id; in mdp_path_subfrm_require()
186 b = &data->comp_data[id].blend; in mdp_path_subfrm_require()
187 if (b && b->aid_mod) in mdp_path_subfrm_require()
188 mtk_mutex_write_mod(mutex, mutex_idx[b->b_id], false); in mdp_path_subfrm_require()
200 const int p_id = path->mdp_dev->mdp_data->mdp_plat_id; in mdp_path_subfrm_run()
202 struct device *dev = &path->mdp_dev->pdev->dev; in mdp_path_subfrm_run()
209 if (-1 == p->mutex_id) { in mdp_path_subfrm_run()
211 return -EINVAL; in mdp_path_subfrm_run()
215 num_comp = CFG_GET(MT8183, path->config, num_components); in mdp_path_subfrm_run()
217 num_comp = CFG_GET(MT8195, path->config, num_components); in mdp_path_subfrm_run()
223 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_subfrm_run()
225 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_subfrm_run()
227 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_subfrm_run()
229 ctx = &path->comps[index]; in mdp_path_subfrm_run()
230 if (is_output_disabled(p_id, ctx->param, count)) in mdp_path_subfrm_run()
232 event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF]; in mdp_path_subfrm_run()
238 mutex = __get_mutex(path->mdp_dev, p); in mdp_path_subfrm_run()
239 mtk_mutex_enable_by_cmdq(mutex, (void *)&cmd->pkt); in mdp_path_subfrm_run()
244 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_subfrm_run()
246 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_subfrm_run()
248 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_subfrm_run()
250 ctx = &path->comps[index]; in mdp_path_subfrm_run()
251 if (is_output_disabled(p_id, ctx->param, count)) in mdp_path_subfrm_run()
253 event = ctx->comp->gce_event[MDP_GCE_EVENT_SOF]; in mdp_path_subfrm_run()
263 const int p_id = mdp->mdp_data->mdp_plat_id; in mdp_path_ctx_init()
269 num_comp = CFG_GET(MT8183, path->config, num_components); in mdp_path_ctx_init()
271 num_comp = CFG_GET(MT8195, path->config, num_components); in mdp_path_ctx_init()
274 return -EINVAL; in mdp_path_ctx_init()
280 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_ctx_init()
282 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_ctx_init()
284 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_ctx_init()
287 param = (void *)CFG_ADDR(MT8183, path->config, components[index]); in mdp_path_ctx_init()
289 param = (void *)CFG_ADDR(MT8195, path->config, components[index]); in mdp_path_ctx_init()
290 ret = mdp_comp_ctx_config(mdp, &path->comps[index], in mdp_path_ctx_init()
291 param, path->param); in mdp_path_ctx_init()
302 const int p_id = path->mdp_dev->mdp_data->mdp_plat_id; in mdp_path_config_subfrm()
312 num_comp = CFG_GET(MT8183, path->config, num_components); in mdp_path_config_subfrm()
314 num_comp = CFG_GET(MT8195, path->config, num_components); in mdp_path_config_subfrm()
317 ctrl = CFG_ADDR(MT8183, path->config, ctrls[count]); in mdp_path_config_subfrm()
319 ctrl = CFG_ADDR(MT8195, path->config, ctrls[count]); in mdp_path_config_subfrm()
321 /* Acquire components */ in mdp_path_config_subfrm()
326 for (index = 0; index < ctrl->num_sets; index++) { in mdp_path_config_subfrm()
327 set = &ctrl->sets[index]; in mdp_path_config_subfrm()
328 cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg, in mdp_path_config_subfrm()
329 set->value, 0xFFFFFFFF); in mdp_path_config_subfrm()
331 /* Config sub-frame information */ in mdp_path_config_subfrm()
332 for (index = (num_comp - 1); index >= 0; index--) { in mdp_path_config_subfrm()
334 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_config_subfrm()
336 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_config_subfrm()
338 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_config_subfrm()
340 ctx = &path->comps[index]; in mdp_path_config_subfrm()
341 if (is_output_disabled(p_id, ctx->param, count)) in mdp_path_config_subfrm()
347 /* Run components */ in mdp_path_config_subfrm()
351 /* Wait components done */ in mdp_path_config_subfrm()
354 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_config_subfrm()
356 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_config_subfrm()
358 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_config_subfrm()
360 ctx = &path->comps[index]; in mdp_path_config_subfrm()
361 if (is_output_disabled(p_id, ctx->param, count)) in mdp_path_config_subfrm()
367 /* Advance to the next sub-frame */ in mdp_path_config_subfrm()
370 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_config_subfrm()
372 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_config_subfrm()
374 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_config_subfrm()
376 ctx = &path->comps[index]; in mdp_path_config_subfrm()
382 for (index = 0; index < ctrl->num_sets; index++) { in mdp_path_config_subfrm()
383 set = &ctrl->sets[index]; in mdp_path_config_subfrm()
384 cmdq_pkt_write_mask(&cmd->pkt, set->subsys_id, set->reg, in mdp_path_config_subfrm()
394 const int p_id = mdp->mdp_data->mdp_plat_id; in mdp_path_config()
402 num_comp = CFG_GET(MT8183, path->config, num_components); in mdp_path_config()
404 num_comp = CFG_GET(MT8195, path->config, num_components); in mdp_path_config()
407 num_sub = CFG_GET(MT8183, path->config, num_subfrms); in mdp_path_config()
409 num_sub = CFG_GET(MT8195, path->config, num_subfrms); in mdp_path_config()
412 /* Reset components */ in mdp_path_config()
415 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_config()
417 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_config()
419 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_config()
421 ctx = &path->comps[index]; in mdp_path_config()
431 ctx = &path->comps[index]; in mdp_path_config()
433 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_config()
435 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_config()
437 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_config()
441 out = CFG_COMP(MT8183, ctx->param, outputs[0]); in mdp_path_config()
443 out = CFG_COMP(MT8195, ctx->param, outputs[0]); in mdp_path_config()
445 compose = path->composes[out]; in mdp_path_config()
451 /* Config path sub-frames */ in mdp_path_config()
460 inner_id = CFG_GET(MT8183, path->config, components[index].type); in mdp_path_config()
462 inner_id = CFG_GET(MT8195, path->config, components[index].type); in mdp_path_config()
464 if (mdp_cfg_comp_is_dummy(path->mdp_dev, inner_id)) in mdp_path_config()
466 ctx = &path->comps[index]; in mdp_path_config()
480 pkt->va_base = kzalloc(size, GFP_KERNEL); in mdp_cmdq_pkt_create()
481 if (!pkt->va_base) in mdp_cmdq_pkt_create()
482 return -ENOMEM; in mdp_cmdq_pkt_create()
484 pkt->buf_size = size; in mdp_cmdq_pkt_create()
485 pkt->cl = (void *)client; in mdp_cmdq_pkt_create()
487 dev = client->chan->mbox->dev; in mdp_cmdq_pkt_create()
488 dma_addr = dma_map_single(dev, pkt->va_base, pkt->buf_size, in mdp_cmdq_pkt_create()
492 kfree(pkt->va_base); in mdp_cmdq_pkt_create()
493 return -ENOMEM; in mdp_cmdq_pkt_create()
496 pkt->pa_base = dma_addr; in mdp_cmdq_pkt_create()
503 struct cmdq_client *client = (struct cmdq_client *)pkt->cl; in mdp_cmdq_pkt_destroy()
505 dma_unmap_single(client->chan->mbox->dev, pkt->pa_base, pkt->buf_size, in mdp_cmdq_pkt_destroy()
507 kfree(pkt->va_base); in mdp_cmdq_pkt_destroy()
508 pkt->va_base = NULL; in mdp_cmdq_pkt_destroy()
519 mdp = cmd->mdp; in mdp_auto_release_work()
521 pipe_id = __get_pipe(mdp, cmd->comps[0].public_id); in mdp_auto_release_work()
522 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); in mdp_auto_release_work()
524 mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, in mdp_auto_release_work()
525 cmd->num_comps); in mdp_auto_release_work()
527 if (refcount_dec_and_test(&mdp->job_count)) { in mdp_auto_release_work()
528 if (cmd->mdp_ctx) in mdp_auto_release_work()
529 mdp_m2m_job_finish(cmd->mdp_ctx); in mdp_auto_release_work()
531 if (cmd->user_cmdq_cb) { in mdp_auto_release_work()
534 user_cb_data.sta = cmd->data->sta; in mdp_auto_release_work()
535 user_cb_data.pkt = cmd->data->pkt; in mdp_auto_release_work()
536 cmd->user_cmdq_cb(user_cb_data); in mdp_auto_release_work()
538 wake_up(&mdp->callback_wq); in mdp_auto_release_work()
541 mdp_cmdq_pkt_destroy(&cmd->pkt); in mdp_auto_release_work()
542 kfree(cmd->comps); in mdp_auto_release_work()
543 cmd->comps = NULL; in mdp_auto_release_work()
562 cmd = container_of(data->pkt, struct mdp_cmdq_cmd, pkt); in mdp_handle_cmdq_callback()
563 cmd->data = data; in mdp_handle_cmdq_callback()
564 mdp = cmd->mdp; in mdp_handle_cmdq_callback()
565 dev = &mdp->pdev->dev; in mdp_handle_cmdq_callback()
567 INIT_WORK(&cmd->auto_release_work, mdp_auto_release_work); in mdp_handle_cmdq_callback()
568 if (!queue_work(mdp->clock_wq, &cmd->auto_release_work)) { in mdp_handle_cmdq_callback()
572 pipe_id = __get_pipe(mdp, cmd->comps[0].public_id); in mdp_handle_cmdq_callback()
573 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); in mdp_handle_cmdq_callback()
575 mdp_comp_clocks_off(&mdp->pdev->dev, cmd->comps, in mdp_handle_cmdq_callback()
576 cmd->num_comps); in mdp_handle_cmdq_callback()
578 if (refcount_dec_and_test(&mdp->job_count)) in mdp_handle_cmdq_callback()
579 wake_up(&mdp->callback_wq); in mdp_handle_cmdq_callback()
581 mdp_cmdq_pkt_destroy(&cmd->pkt); in mdp_handle_cmdq_callback()
582 kfree(cmd->comps); in mdp_handle_cmdq_callback()
583 cmd->comps = NULL; in mdp_handle_cmdq_callback()
596 struct device *dev = &mdp->pdev->dev; in mdp_cmdq_prepare()
597 const int p_id = mdp->mdp_data->mdp_plat_id; in mdp_cmdq_prepare()
601 int i, ret = -ECANCELED; in mdp_cmdq_prepare()
619 ret = -ENOMEM; in mdp_cmdq_prepare()
623 ret = mdp_cmdq_pkt_create(mdp->cmdq_clt[pp_idx], &cmd->pkt, SZ_16K); in mdp_cmdq_prepare()
628 num_comp = CFG_GET(MT8183, param->config, num_components); in mdp_cmdq_prepare()
630 num_comp = CFG_GET(MT8195, param->config, num_components); in mdp_cmdq_prepare()
632 ret = -EINVAL; in mdp_cmdq_prepare()
637 ret = -ENOMEM; in mdp_cmdq_prepare()
643 ret = -ENOMEM; in mdp_cmdq_prepare()
647 path->mdp_dev = mdp; in mdp_cmdq_prepare()
648 path->config = config; in mdp_cmdq_prepare()
649 path->param = param->param; in mdp_cmdq_prepare()
650 for (i = 0; i < param->param->num_outputs; i++) { in mdp_cmdq_prepare()
651 path->bounds[i].left = 0; in mdp_cmdq_prepare()
652 path->bounds[i].top = 0; in mdp_cmdq_prepare()
653 path->bounds[i].width = in mdp_cmdq_prepare()
654 param->param->outputs[i].buffer.format.width; in mdp_cmdq_prepare()
655 path->bounds[i].height = in mdp_cmdq_prepare()
656 param->param->outputs[i].buffer.format.height; in mdp_cmdq_prepare()
657 path->composes[i] = param->composes[i] ? in mdp_cmdq_prepare()
658 param->composes[i] : &path->bounds[i]; in mdp_cmdq_prepare()
666 pipe_id = __get_pipe(mdp, path->comps[0].comp->public_id); in mdp_cmdq_prepare()
667 mutex = __get_mutex(mdp, &mdp->mdp_data->pipe_info[pipe_id]); in mdp_cmdq_prepare()
679 cmdq_pkt_finalize(&cmd->pkt); in mdp_cmdq_prepare()
685 inner_id = CFG_GET(MT8183, path->config, components[i].type); in mdp_cmdq_prepare()
687 inner_id = CFG_GET(MT8195, path->config, components[i].type); in mdp_cmdq_prepare()
691 memcpy(&comps[i], path->comps[i].comp, in mdp_cmdq_prepare()
695 mdp->cmdq_clt[pp_idx]->client.rx_callback = mdp_handle_cmdq_callback; in mdp_cmdq_prepare()
696 cmd->mdp = mdp; in mdp_cmdq_prepare()
697 cmd->user_cmdq_cb = param->cmdq_cb; in mdp_cmdq_prepare()
698 cmd->user_cb_data = param->cb_data; in mdp_cmdq_prepare()
699 cmd->comps = comps; in mdp_cmdq_prepare()
700 cmd->num_comps = num_comp; in mdp_cmdq_prepare()
701 cmd->mdp_ctx = param->mdp_ctx; in mdp_cmdq_prepare()
713 mdp_cmdq_pkt_destroy(&cmd->pkt); in mdp_cmdq_prepare()
723 struct device *dev = &mdp->pdev->dev; in mdp_cmdq_send()
725 u8 pp_used = __get_pp_num(param->param->type); in mdp_cmdq_send()
727 refcount_set(&mdp->job_count, pp_used); in mdp_cmdq_send()
728 if (atomic_read(&mdp->suspended)) { in mdp_cmdq_send()
729 refcount_set(&mdp->job_count, 0); in mdp_cmdq_send()
730 return -ECANCELED; in mdp_cmdq_send()
742 ret = mdp_comp_clocks_on(&mdp->pdev->dev, cmd[i]->comps, cmd[i]->num_comps); in mdp_cmdq_send()
748 dma_sync_single_for_device(mdp->cmdq_clt[i]->chan->mbox->dev, in mdp_cmdq_send()
749 cmd[i]->pkt.pa_base, cmd[i]->pkt.cmd_buf_size, in mdp_cmdq_send()
752 ret = mbox_send_message(mdp->cmdq_clt[i]->chan, &cmd[i]->pkt); in mdp_cmdq_send()
758 mbox_client_txdone(mdp->cmdq_clt[i]->chan, 0); in mdp_cmdq_send()
763 while (--i >= 0) in mdp_cmdq_send()
764 mdp_comp_clocks_off(&mdp->pdev->dev, cmd[i]->comps, in mdp_cmdq_send()
765 cmd[i]->num_comps); in mdp_cmdq_send()
767 refcount_set(&mdp->job_count, 0); in mdp_cmdq_send()