Lines Matching full:queue
86 WARN(1, "Invalid queue type"); in get_ctx_state_size()
116 pvr_context_put(fence->queue->ctx); in pvr_queue_fence_release()
125 switch (fence->queue->type) { in pvr_queue_job_fence_get_timeline_name()
139 WARN(1, "Invalid queue type"); in pvr_queue_job_fence_get_timeline_name()
148 switch (fence->queue->type) { in pvr_queue_cccb_fence_get_timeline_name()
162 WARN(1, "Invalid queue type"); in pvr_queue_cccb_fence_get_timeline_name()
254 * @queue: The queue this fence belongs to.
259 * pvr_queue_fence::queue field too.
263 struct pvr_queue *queue, in pvr_queue_fence_init() argument
269 pvr_context_get(queue->ctx); in pvr_queue_fence_init()
270 fence->queue = queue; in pvr_queue_fence_init()
279 * @queue: The queue this fence belongs to.
287 pvr_queue_cccb_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_cccb_fence_init() argument
289 pvr_queue_fence_init(fence, queue, &pvr_queue_cccb_fence_ops, in pvr_queue_cccb_fence_init()
290 &queue->cccb_fence_ctx.base); in pvr_queue_cccb_fence_init()
296 * @queue: The queue this fence belongs to.
305 pvr_queue_job_fence_init(struct dma_fence *fence, struct pvr_queue *queue) in pvr_queue_job_fence_init() argument
307 pvr_queue_fence_init(fence, queue, &pvr_queue_job_fence_ops, in pvr_queue_job_fence_init()
308 &queue->job_fence_ctx); in pvr_queue_job_fence_init()
312 * pvr_queue_fence_ctx_init() - Queue fence context initialization.
377 * @queue: The queue this job will be submitted to.
388 pvr_queue_get_job_cccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_cccb_fence() argument
399 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
403 if (pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_get_job_cccb_fence()
412 if (WARN_ON(queue->cccb_fence_ctx.job)) in pvr_queue_get_job_cccb_fence()
413 pvr_job_put(queue->cccb_fence_ctx.job); in pvr_queue_get_job_cccb_fence()
415 queue->cccb_fence_ctx.job = pvr_job_get(job); in pvr_queue_get_job_cccb_fence()
419 if (!WARN_ON(cccb_fence->queue)) in pvr_queue_get_job_cccb_fence()
420 pvr_queue_cccb_fence_init(job->cccb_fence, queue); in pvr_queue_get_job_cccb_fence()
423 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_get_job_cccb_fence()
430 * @queue: The queue this job will be submitted to.
441 pvr_queue_get_job_kccb_fence(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_job_kccb_fence() argument
443 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_get_job_kccb_fence()
461 pvr_queue_get_paired_frag_job_dep(struct pvr_queue *queue, struct pvr_job *job) in pvr_queue_get_paired_frag_job_dep() argument
483 return frag_job->base.sched->ops->prepare_job(&frag_job->base, &queue->entity); in pvr_queue_get_paired_frag_job_dep()
489 * @s_entity: The entity this job is queue on.
499 struct pvr_queue *queue = container_of(s_entity, struct pvr_queue, entity); in pvr_queue_prepare_job() local
525 pvr_queue_job_fence_init(job->done_fence, queue); in pvr_queue_prepare_job()
531 internal_dep = pvr_queue_get_job_cccb_fence(queue, job); in pvr_queue_prepare_job()
533 /* KCCB fence is used to make sure we have a KCCB slot to queue our in pvr_queue_prepare_job()
537 internal_dep = pvr_queue_get_job_kccb_fence(queue, job); in pvr_queue_prepare_job()
543 * internal_dep = pvr_queue_get_job_xxxx_fence(queue, job); in pvr_queue_prepare_job()
548 internal_dep = pvr_queue_get_paired_frag_job_dep(queue, job); in pvr_queue_prepare_job()
554 * pvr_queue_update_active_state_locked() - Update the queue active state.
555 * @queue: Queue to update the state on.
558 * pvr_device::queue::lock held.
560 static void pvr_queue_update_active_state_locked(struct pvr_queue *queue) in pvr_queue_update_active_state_locked() argument
562 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state_locked()
566 /* The queue is temporary out of any list when it's being reset, in pvr_queue_update_active_state_locked()
570 if (list_empty(&queue->node)) in pvr_queue_update_active_state_locked()
573 if (!atomic_read(&queue->in_flight_job_count)) in pvr_queue_update_active_state_locked()
574 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_update_active_state_locked()
576 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_update_active_state_locked()
580 * pvr_queue_update_active_state() - Update the queue active state.
581 * @queue: Queue to update the state on.
585 * Updating the active state implies moving the queue in or out of the
586 * active queue list, which also defines whether the queue is checked
592 static void pvr_queue_update_active_state(struct pvr_queue *queue) in pvr_queue_update_active_state() argument
594 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_update_active_state()
597 pvr_queue_update_active_state_locked(queue); in pvr_queue_update_active_state()
603 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_submit_job_to_cccb() local
605 struct pvr_cccb *cccb = &queue->cccb; in pvr_queue_submit_job_to_cccb()
611 /* We need to add the queue to the active list before updating the CCCB, in pvr_queue_submit_job_to_cccb()
613 * happened on this queue. in pvr_queue_submit_job_to_cccb()
615 atomic_inc(&queue->in_flight_job_count); in pvr_queue_submit_job_to_cccb()
616 pvr_queue_update_active_state(queue); in pvr_queue_submit_job_to_cccb()
631 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
646 pvr_fw_object_get_fw_addr(jfence->queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
660 /* Reference value for the partial render test is the current queue fence in pvr_queue_submit_job_to_cccb()
663 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, in pvr_queue_submit_job_to_cccb()
673 pvr_fw_object_get_fw_addr(queue->timeline_ufo.fw_obj, &ufos[0].addr); in pvr_queue_submit_job_to_cccb()
745 struct pvr_queue *queue = container_of(job->base.sched, in pvr_queue_run_job() local
748 pvr_cccb_send_kccb_kick(pvr_dev, &queue->cccb, in pvr_queue_run_job()
749 pvr_context_get_fw_addr(job->ctx) + queue->ctx_offset, in pvr_queue_run_job()
756 static void pvr_queue_stop(struct pvr_queue *queue, struct pvr_job *bad_job) in pvr_queue_stop() argument
758 drm_sched_stop(&queue->scheduler, bad_job ? &bad_job->base : NULL); in pvr_queue_stop()
761 static void pvr_queue_start(struct pvr_queue *queue) in pvr_queue_start() argument
768 *queue->timeline_ufo.value = atomic_read(&queue->job_fence_ctx.seqno); in pvr_queue_start()
770 list_for_each_entry(job, &queue->scheduler.pending_list, base.list) { in pvr_queue_start()
781 atomic_set(&queue->ctx->faulty, 1); in pvr_queue_start()
785 drm_sched_start(&queue->scheduler); in pvr_queue_start()
802 struct pvr_queue *queue = container_of(sched, struct pvr_queue, scheduler); in pvr_queue_timedout_job() local
803 struct pvr_device *pvr_dev = queue->ctx->pvr_dev; in pvr_queue_timedout_job()
809 /* Before we stop the scheduler, make sure the queue is out of any list, so in pvr_queue_timedout_job()
812 * queue in the active list. This would cause in pvr_queue_timedout_job()
820 list_del_init(&queue->node); in pvr_queue_timedout_job()
830 WARN_ON(atomic_read(&queue->in_flight_job_count) != job_count); in pvr_queue_timedout_job()
832 /* Re-insert the queue in the proper list, and kick a queue processing in pvr_queue_timedout_job()
837 list_move_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_timedout_job()
839 atomic_set(&queue->in_flight_job_count, job_count); in pvr_queue_timedout_job()
840 list_move_tail(&queue->node, &pvr_dev->queues.active); in pvr_queue_timedout_job()
841 pvr_queue_process(queue); in pvr_queue_timedout_job()
875 * pvr_job::done_fence objects are backed by the timeline UFO attached to the queue
896 * @queue: Queue to check.
899 * the UFO object attached to the queue.
902 pvr_queue_signal_done_fences(struct pvr_queue *queue) in pvr_queue_signal_done_fences() argument
907 spin_lock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
908 cur_seqno = *queue->timeline_ufo.value; in pvr_queue_signal_done_fences()
909 list_for_each_entry_safe(job, tmp_job, &queue->scheduler.pending_list, base.list) { in pvr_queue_signal_done_fences()
916 atomic_dec(&queue->in_flight_job_count); in pvr_queue_signal_done_fences()
919 spin_unlock(&queue->scheduler.job_list_lock); in pvr_queue_signal_done_fences()
926 * @queue: Queue to check
932 pvr_queue_check_job_waiting_for_cccb_space(struct pvr_queue *queue) in pvr_queue_check_job_waiting_for_cccb_space() argument
938 mutex_lock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
939 job = queue->cccb_fence_ctx.job; in pvr_queue_check_job_waiting_for_cccb_space()
953 if (WARN_ON(!cccb_fence->queue)) { in pvr_queue_check_job_waiting_for_cccb_space()
963 if (!pvr_cccb_cmdseq_fits(&queue->cccb, job_cmds_size(job, native_deps_remaining))) { in pvr_queue_check_job_waiting_for_cccb_space()
971 queue->cccb_fence_ctx.job = NULL; in pvr_queue_check_job_waiting_for_cccb_space()
974 mutex_unlock(&queue->cccb_fence_ctx.job_lock); in pvr_queue_check_job_waiting_for_cccb_space()
980 * pvr_queue_process() - Process events that happened on a queue.
981 * @queue: Queue to check
985 void pvr_queue_process(struct pvr_queue *queue) in pvr_queue_process() argument
987 lockdep_assert_held(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_process()
989 pvr_queue_check_job_waiting_for_cccb_space(queue); in pvr_queue_process()
990 pvr_queue_signal_done_fences(queue); in pvr_queue_process()
991 pvr_queue_update_active_state_locked(queue); in pvr_queue_process()
994 static u32 get_dm_type(struct pvr_queue *queue) in get_dm_type() argument
996 switch (queue->type) { in get_dm_type()
1010 * init_fw_context() - Initializes the queue part of a FW context.
1011 * @queue: Queue object to initialize the FW context for.
1014 * FW contexts are containing various states, one of them being a per-queue state
1015 * that needs to be initialized for each queue being exposed by a context. This
1018 static void init_fw_context(struct pvr_queue *queue, void *fw_ctx_map) in init_fw_context() argument
1020 struct pvr_context *ctx = queue->ctx; in init_fw_context()
1023 struct pvr_cccb *cccb = &queue->cccb; in init_fw_context()
1025 cctx_fw = fw_ctx_map + queue->ctx_offset; in init_fw_context()
1029 cctx_fw->dm = get_dm_type(queue); in init_fw_context()
1038 pvr_fw_object_get_fw_addr(queue->reg_state_obj, &cctx_fw->context_state_addr); in init_fw_context()
1043 * @queue: Queue on FW context to clean up.
1049 static int pvr_queue_cleanup_fw_context(struct pvr_queue *queue) in pvr_queue_cleanup_fw_context() argument
1051 if (!queue->ctx->fw_obj) in pvr_queue_cleanup_fw_context()
1054 return pvr_fw_structure_cleanup(queue->ctx->pvr_dev, in pvr_queue_cleanup_fw_context()
1056 queue->ctx->fw_obj, queue->ctx_offset); in pvr_queue_cleanup_fw_context()
1060 * pvr_queue_job_init() - Initialize queue related fields in a pvr_job object.
1063 * Bind the job to a queue and allocate memory to guarantee pvr_queue_job_arm()
1075 struct pvr_queue *queue; in pvr_queue_job_init() local
1081 queue = pvr_context_get_queue_for_job(job->ctx, job->type); in pvr_queue_job_init()
1082 if (!queue) in pvr_queue_job_init()
1085 if (!pvr_cccb_cmdseq_can_fit(&queue->cccb, job_cmds_size(job, min_native_dep_count))) in pvr_queue_job_init()
1088 err = drm_sched_job_init(&job->base, &queue->entity, 1, THIS_MODULE); in pvr_queue_job_init()
1140 * pvr_queue_job_push() - Push a job to its queue.
1144 * have been added to the job. This will effectively queue the job to
1145 * the drm_sched_entity attached to the queue. We grab a reference on
1151 struct pvr_queue *queue = container_of(job->base.sched, struct pvr_queue, scheduler); in pvr_queue_job_push() local
1154 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_job_push()
1155 queue->last_queued_job_scheduled_fence = dma_fence_get(&job->base.s_fence->scheduled); in pvr_queue_job_push()
1163 struct pvr_queue *queue = priv; in reg_state_init() local
1165 if (queue->type == DRM_PVR_JOB_TYPE_GEOMETRY) { in reg_state_init()
1169 queue->callstack_addr; in reg_state_init()
1174 * pvr_queue_create() - Create a queue object.
1175 * @ctx: The context this queue will be attached to.
1176 * @type: The type of jobs being pushed to this queue.
1180 * Create a queue object that will be used to queue and track jobs.
1214 struct pvr_queue *queue; in pvr_queue_create() local
1243 queue = kzalloc(sizeof(*queue), GFP_KERNEL); in pvr_queue_create()
1244 if (!queue) in pvr_queue_create()
1247 queue->type = type; in pvr_queue_create()
1248 queue->ctx_offset = get_ctx_offset(type); in pvr_queue_create()
1249 queue->ctx = ctx; in pvr_queue_create()
1250 queue->callstack_addr = args->callstack_addr; in pvr_queue_create()
1251 sched = &queue->scheduler; in pvr_queue_create()
1252 INIT_LIST_HEAD(&queue->node); in pvr_queue_create()
1253 mutex_init(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1254 pvr_queue_fence_ctx_init(&queue->cccb_fence_ctx.base); in pvr_queue_create()
1255 pvr_queue_fence_ctx_init(&queue->job_fence_ctx); in pvr_queue_create()
1257 err = pvr_cccb_init(pvr_dev, &queue->cccb, props[type].cccb_size, props[type].name); in pvr_queue_create()
1263 reg_state_init, queue, &queue->reg_state_obj); in pvr_queue_create()
1267 init_fw_context(queue, fw_ctx_map); in pvr_queue_create()
1275 cpu_map = pvr_fw_object_create_and_map(pvr_dev, sizeof(*queue->timeline_ufo.value), in pvr_queue_create()
1277 NULL, NULL, &queue->timeline_ufo.fw_obj); in pvr_queue_create()
1283 queue->timeline_ufo.value = cpu_map; in pvr_queue_create()
1285 err = drm_sched_init(&queue->scheduler, in pvr_queue_create()
1289 pvr_dev->sched_wq, NULL, "pvr-queue", in pvr_queue_create()
1294 err = drm_sched_entity_init(&queue->entity, in pvr_queue_create()
1301 list_add_tail(&queue->node, &pvr_dev->queues.idle); in pvr_queue_create()
1304 return queue; in pvr_queue_create()
1307 drm_sched_fini(&queue->scheduler); in pvr_queue_create()
1310 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_create()
1313 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_create()
1316 pvr_cccb_fini(&queue->cccb); in pvr_queue_create()
1319 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_create()
1320 kfree(queue); in pvr_queue_create()
1327 struct pvr_queue *queue; in pvr_queue_device_pre_reset() local
1330 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_pre_reset()
1331 pvr_queue_stop(queue, NULL); in pvr_queue_device_pre_reset()
1332 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_pre_reset()
1333 pvr_queue_stop(queue, NULL); in pvr_queue_device_pre_reset()
1339 struct pvr_queue *queue; in pvr_queue_device_post_reset() local
1342 list_for_each_entry(queue, &pvr_dev->queues.active, node) in pvr_queue_device_post_reset()
1343 pvr_queue_start(queue); in pvr_queue_device_post_reset()
1344 list_for_each_entry(queue, &pvr_dev->queues.idle, node) in pvr_queue_device_post_reset()
1345 pvr_queue_start(queue); in pvr_queue_device_post_reset()
1350 * pvr_queue_kill() - Kill a queue.
1351 * @queue: The queue to kill.
1353 * Kill the queue so no new jobs can be pushed. Should be called when the
1354 * context handle is destroyed. The queue object might last longer if jobs
1355 * are still in flight and holding a reference to the context this queue
1358 void pvr_queue_kill(struct pvr_queue *queue) in pvr_queue_kill() argument
1360 drm_sched_entity_destroy(&queue->entity); in pvr_queue_kill()
1361 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_kill()
1362 queue->last_queued_job_scheduled_fence = NULL; in pvr_queue_kill()
1366 * pvr_queue_destroy() - Destroy a queue.
1367 * @queue: The queue to destroy.
1369 * Cleanup the queue and free the resources attached to it. Should be
1372 void pvr_queue_destroy(struct pvr_queue *queue) in pvr_queue_destroy() argument
1374 if (!queue) in pvr_queue_destroy()
1377 mutex_lock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1378 list_del_init(&queue->node); in pvr_queue_destroy()
1379 mutex_unlock(&queue->ctx->pvr_dev->queues.lock); in pvr_queue_destroy()
1381 drm_sched_fini(&queue->scheduler); in pvr_queue_destroy()
1382 drm_sched_entity_fini(&queue->entity); in pvr_queue_destroy()
1384 if (WARN_ON(queue->last_queued_job_scheduled_fence)) in pvr_queue_destroy()
1385 dma_fence_put(queue->last_queued_job_scheduled_fence); in pvr_queue_destroy()
1387 pvr_queue_cleanup_fw_context(queue); in pvr_queue_destroy()
1389 pvr_fw_object_unmap_and_destroy(queue->timeline_ufo.fw_obj); in pvr_queue_destroy()
1390 pvr_fw_object_destroy(queue->reg_state_obj); in pvr_queue_destroy()
1391 pvr_cccb_fini(&queue->cccb); in pvr_queue_destroy()
1392 mutex_destroy(&queue->cccb_fence_ctx.job_lock); in pvr_queue_destroy()
1393 kfree(queue); in pvr_queue_destroy()
1397 * pvr_queue_device_init() - Device-level initialization of queue related fields.
1400 * Initializes all fields related to queue management in pvr_device.
1424 * pvr_queue_device_fini() - Device-level cleanup of queue related fields.
1427 * Cleanup/free all queue-related resources attached to a pvr_device object.