Lines Matching full:pipe
22 struct lima_sched_pipe *pipe; member
64 return f->pipe->base.name; in lima_fence_get_timeline_name()
88 static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe) in lima_fence_create() argument
96 fence->pipe = pipe; in lima_fence_create()
97 dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock, in lima_fence_create()
98 pipe->fence_context, ++pipe->fence_seqno); in lima_fence_create()
156 int lima_sched_context_init(struct lima_sched_pipe *pipe, in lima_sched_context_init() argument
159 struct drm_gpu_scheduler *sched = &pipe->base; in lima_sched_context_init()
165 void lima_sched_context_fini(struct lima_sched_pipe *pipe, in lima_sched_context_fini() argument
205 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_run_job() local
206 struct lima_device *ldev = pipe->ldev; in lima_sched_run_job()
214 fence = lima_fence_create(pipe); in lima_sched_run_job()
231 pipe->current_task = task; in lima_sched_run_job()
248 for (i = 0; i < pipe->num_l2_cache; i++) in lima_sched_run_job()
249 lima_l2_cache_flush(pipe->l2_cache[i]); in lima_sched_run_job()
251 lima_vm_put(pipe->current_vm); in lima_sched_run_job()
252 pipe->current_vm = lima_vm_get(task->vm); in lima_sched_run_job()
254 if (pipe->bcast_mmu) in lima_sched_run_job()
255 lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm); in lima_sched_run_job()
257 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_run_job()
258 lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm); in lima_sched_run_job()
263 pipe->error = false; in lima_sched_run_job()
264 pipe->task_run(pipe, task); in lima_sched_run_job()
272 struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched); in lima_sched_build_error_task_list() local
273 struct lima_ip *ip = pipe->processor[0]; in lima_sched_build_error_task_list()
299 size = sizeof(struct lima_dump_chunk) + pipe->frame_size; in lima_sched_build_error_task_list()
332 chunk->size = pipe->frame_size; in lima_sched_build_error_task_list()
333 memcpy(chunk + 1, task->frame, pipe->frame_size); in lima_sched_build_error_task_list()
401 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_timedout_job() local
403 struct lima_device *ldev = pipe->ldev; in lima_sched_timedout_job()
404 struct lima_ip *ip = pipe->processor[0]; in lima_sched_timedout_job()
423 for (i = 0; i < pipe->num_processor; i++) in lima_sched_timedout_job()
424 synchronize_irq(pipe->processor[i]->irq); in lima_sched_timedout_job()
425 if (pipe->bcast_processor) in lima_sched_timedout_job()
426 synchronize_irq(pipe->bcast_processor->irq); in lima_sched_timedout_job()
438 pipe->task_mask_irq(pipe); in lima_sched_timedout_job()
440 if (!pipe->error) in lima_sched_timedout_job()
443 drm_sched_stop(&pipe->base, &task->base); in lima_sched_timedout_job()
450 pipe->task_error(pipe); in lima_sched_timedout_job()
452 if (pipe->bcast_mmu) in lima_sched_timedout_job()
453 lima_mmu_page_fault_resume(pipe->bcast_mmu); in lima_sched_timedout_job()
455 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_timedout_job()
456 lima_mmu_page_fault_resume(pipe->mmu[i]); in lima_sched_timedout_job()
459 lima_vm_put(pipe->current_vm); in lima_sched_timedout_job()
460 pipe->current_vm = NULL; in lima_sched_timedout_job()
461 pipe->current_task = NULL; in lima_sched_timedout_job()
465 drm_sched_resubmit_jobs(&pipe->base); in lima_sched_timedout_job()
466 drm_sched_start(&pipe->base); in lima_sched_timedout_job()
474 struct lima_sched_pipe *pipe = to_lima_pipe(job->sched); in lima_sched_free_job() local
485 kmem_cache_free(pipe->task_slab, task); in lima_sched_free_job()
496 struct lima_sched_pipe *pipe = in lima_sched_recover_work() local
500 for (i = 0; i < pipe->num_l2_cache; i++) in lima_sched_recover_work()
501 lima_l2_cache_flush(pipe->l2_cache[i]); in lima_sched_recover_work()
503 if (pipe->bcast_mmu) { in lima_sched_recover_work()
504 lima_mmu_flush_tlb(pipe->bcast_mmu); in lima_sched_recover_work()
506 for (i = 0; i < pipe->num_mmu; i++) in lima_sched_recover_work()
507 lima_mmu_flush_tlb(pipe->mmu[i]); in lima_sched_recover_work()
510 if (pipe->task_recover(pipe)) in lima_sched_recover_work()
511 drm_sched_fault(&pipe->base); in lima_sched_recover_work()
514 int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name) in lima_sched_pipe_init() argument
519 pipe->fence_context = dma_fence_context_alloc(1); in lima_sched_pipe_init()
520 spin_lock_init(&pipe->fence_lock); in lima_sched_pipe_init()
522 INIT_WORK(&pipe->recover_work, lima_sched_recover_work); in lima_sched_pipe_init()
524 return drm_sched_init(&pipe->base, &lima_sched_ops, NULL, in lima_sched_pipe_init()
529 NULL, name, pipe->ldev->dev); in lima_sched_pipe_init()
532 void lima_sched_pipe_fini(struct lima_sched_pipe *pipe) in lima_sched_pipe_fini() argument
534 drm_sched_fini(&pipe->base); in lima_sched_pipe_fini()
537 void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe) in lima_sched_pipe_task_done() argument
539 struct lima_sched_task *task = pipe->current_task; in lima_sched_pipe_task_done()
540 struct lima_device *ldev = pipe->ldev; in lima_sched_pipe_task_done()
542 if (pipe->error) { in lima_sched_pipe_task_done()
544 schedule_work(&pipe->recover_work); in lima_sched_pipe_task_done()
546 drm_sched_fault(&pipe->base); in lima_sched_pipe_task_done()
548 pipe->task_fini(pipe); in lima_sched_pipe_task_done()