Lines Matching +full:1 +full:q

29 	XE_EXEC_QUEUE_TIMESLICE = 1,
34 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q,
37 static void __xe_exec_queue_free(struct xe_exec_queue *q) in __xe_exec_queue_free() argument
39 if (q->vm) in __xe_exec_queue_free()
40 xe_vm_put(q->vm); in __xe_exec_queue_free()
42 if (q->xef) in __xe_exec_queue_free()
43 xe_file_put(q->xef); in __xe_exec_queue_free()
45 kfree(q); in __xe_exec_queue_free()
54 struct xe_exec_queue *q; in __xe_exec_queue_alloc() local
61 q = kzalloc(struct_size(q, lrc, width), GFP_KERNEL); in __xe_exec_queue_alloc()
62 if (!q) in __xe_exec_queue_alloc()
65 kref_init(&q->refcount); in __xe_exec_queue_alloc()
66 q->flags = flags; in __xe_exec_queue_alloc()
67 q->hwe = hwe; in __xe_exec_queue_alloc()
68 q->gt = gt; in __xe_exec_queue_alloc()
69 q->class = hwe->class; in __xe_exec_queue_alloc()
70 q->width = width; in __xe_exec_queue_alloc()
71 q->logical_mask = logical_mask; in __xe_exec_queue_alloc()
72 q->fence_irq = &gt->fence_irq[hwe->class]; in __xe_exec_queue_alloc()
73 q->ring_ops = gt->ring_ops[hwe->class]; in __xe_exec_queue_alloc()
74 q->ops = gt->exec_queue_ops; in __xe_exec_queue_alloc()
75 INIT_LIST_HEAD(&q->lr.link); in __xe_exec_queue_alloc()
76 INIT_LIST_HEAD(&q->multi_gt_link); in __xe_exec_queue_alloc()
77 INIT_LIST_HEAD(&q->hw_engine_group_link); in __xe_exec_queue_alloc()
79 q->sched_props.timeslice_us = hwe->eclass->sched_props.timeslice_us; in __xe_exec_queue_alloc()
80 q->sched_props.preempt_timeout_us = in __xe_exec_queue_alloc()
82 q->sched_props.job_timeout_ms = in __xe_exec_queue_alloc()
84 if (q->flags & EXEC_QUEUE_FLAG_KERNEL && in __xe_exec_queue_alloc()
85 q->flags & EXEC_QUEUE_FLAG_HIGH_PRIORITY) in __xe_exec_queue_alloc()
86 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_KERNEL; in __xe_exec_queue_alloc()
88 q->sched_props.priority = XE_EXEC_QUEUE_PRIORITY_NORMAL; in __xe_exec_queue_alloc()
91 q->vm = xe_vm_get(vm); in __xe_exec_queue_alloc()
95 * may set q->usm, must come before xe_lrc_create(), in __xe_exec_queue_alloc()
96 * may overwrite q->sched_props, must come before q->ops->init() in __xe_exec_queue_alloc()
98 err = exec_queue_user_extensions(xe, q, extensions, 0); in __xe_exec_queue_alloc()
100 __xe_exec_queue_free(q); in __xe_exec_queue_alloc()
105 return q; in __xe_exec_queue_alloc()
108 static int __xe_exec_queue_init(struct xe_exec_queue *q) in __xe_exec_queue_init() argument
110 struct xe_vm *vm = q->vm; in __xe_exec_queue_init()
119 for (i = 0; i < q->width; ++i) { in __xe_exec_queue_init()
120 q->lrc[i] = xe_lrc_create(q->hwe, q->vm, SZ_16K); in __xe_exec_queue_init()
121 if (IS_ERR(q->lrc[i])) { in __xe_exec_queue_init()
122 err = PTR_ERR(q->lrc[i]); in __xe_exec_queue_init()
130 err = q->ops->init(q); in __xe_exec_queue_init()
140 for (i = i - 1; i >= 0; --i) in __xe_exec_queue_init()
141 xe_lrc_put(q->lrc[i]); in __xe_exec_queue_init()
150 struct xe_exec_queue *q; in xe_exec_queue_create() local
153 q = __xe_exec_queue_alloc(xe, vm, logical_mask, width, hwe, flags, in xe_exec_queue_create()
155 if (IS_ERR(q)) in xe_exec_queue_create()
156 return q; in xe_exec_queue_create()
158 err = __xe_exec_queue_init(q); in xe_exec_queue_create()
162 return q; in xe_exec_queue_create()
165 __xe_exec_queue_free(q); in xe_exec_queue_create()
192 return xe_exec_queue_create(xe, vm, logical_mask, 1, hwe0, flags, extensions); in xe_exec_queue_create_class()
216 struct xe_exec_queue *q; in xe_exec_queue_create_bind() local
231 q = xe_exec_queue_create(xe, migrate_vm, in xe_exec_queue_create_bind()
232 BIT(hwe->logical_instance), 1, hwe, in xe_exec_queue_create_bind()
235 q = xe_exec_queue_create_class(xe, gt, migrate_vm, in xe_exec_queue_create_bind()
241 return q; in xe_exec_queue_create_bind()
246 struct xe_exec_queue *q = container_of(ref, struct xe_exec_queue, refcount); in xe_exec_queue_destroy() local
249 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_destroy()
250 if (!(q->flags & EXEC_QUEUE_FLAG_BIND_ENGINE_CHILD)) { in xe_exec_queue_destroy()
251 list_for_each_entry_safe(eq, next, &q->multi_gt_list, in xe_exec_queue_destroy()
256 q->ops->fini(q); in xe_exec_queue_destroy()
259 void xe_exec_queue_fini(struct xe_exec_queue *q) in xe_exec_queue_fini() argument
266 xe_exec_queue_update_run_ticks(q); in xe_exec_queue_fini()
268 for (i = 0; i < q->width; ++i) in xe_exec_queue_fini()
269 xe_lrc_put(q->lrc[i]); in xe_exec_queue_fini()
271 __xe_exec_queue_free(q); in xe_exec_queue_fini()
274 void xe_exec_queue_assign_name(struct xe_exec_queue *q, u32 instance) in xe_exec_queue_assign_name() argument
276 switch (q->class) { in xe_exec_queue_assign_name()
278 snprintf(q->name, sizeof(q->name), "rcs%d", instance); in xe_exec_queue_assign_name()
281 snprintf(q->name, sizeof(q->name), "vcs%d", instance); in xe_exec_queue_assign_name()
284 snprintf(q->name, sizeof(q->name), "vecs%d", instance); in xe_exec_queue_assign_name()
287 snprintf(q->name, sizeof(q->name), "bcs%d", instance); in xe_exec_queue_assign_name()
290 snprintf(q->name, sizeof(q->name), "ccs%d", instance); in xe_exec_queue_assign_name()
293 snprintf(q->name, sizeof(q->name), "gsccs%d", instance); in xe_exec_queue_assign_name()
296 XE_WARN_ON(q->class); in xe_exec_queue_assign_name()
302 struct xe_exec_queue *q; in xe_exec_queue_lookup() local
305 q = xa_load(&xef->exec_queue.xa, id); in xe_exec_queue_lookup()
306 if (q) in xe_exec_queue_lookup()
307 xe_exec_queue_get(q); in xe_exec_queue_lookup()
310 return q; in xe_exec_queue_lookup()
320 static int exec_queue_set_priority(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_priority() argument
329 q->sched_props.priority = value; in exec_queue_set_priority()
385 static int exec_queue_set_timeslice(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_set_timeslice() argument
390 xe_exec_queue_get_prop_minmax(q->hwe->eclass, in exec_queue_set_timeslice()
397 q->sched_props.timeslice_us = value; in exec_queue_set_timeslice()
402 struct xe_exec_queue *q,
411 struct xe_exec_queue *q, in exec_queue_user_ext_set_property() argument
434 return exec_queue_set_property_funcs[idx](xe, q, ext.value); in exec_queue_user_ext_set_property()
438 struct xe_exec_queue *q,
446 static int exec_queue_user_extensions(struct xe_device *xe, struct xe_exec_queue *q, in exec_queue_user_extensions() argument
468 err = exec_queue_user_extension_funcs[idx](xe, q, extensions); in exec_queue_user_extensions()
473 return exec_queue_user_extensions(xe, q, ext.next_extension, in exec_queue_user_extensions()
490 len > 1)) in calc_validate_logical_mask()
515 if (width == 1 || !i) in calc_validate_logical_mask()
521 if (i && XE_IOCTL_DBG(xe, current_mask != prev_mask << 1)) in calc_validate_logical_mask()
543 struct xe_exec_queue *q = NULL; in xe_exec_queue_create_ioctl() local
550 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_create_ioctl()
567 if (XE_IOCTL_DBG(xe, args->width != 1) || in xe_exec_queue_create_ioctl()
568 XE_IOCTL_DBG(xe, args->num_placements != 1) || in xe_exec_queue_create_ioctl()
583 if (q) in xe_exec_queue_create_ioctl()
588 q = new; in xe_exec_queue_create_ioctl()
591 &q->multi_gt_link); in xe_exec_queue_create_ioctl()
621 q = xe_exec_queue_create(xe, vm, logical_mask, in xe_exec_queue_create_ioctl()
626 if (IS_ERR(q)) in xe_exec_queue_create_ioctl()
627 return PTR_ERR(q); in xe_exec_queue_create_ioctl()
630 q->lr.context = dma_fence_context_alloc(1); in xe_exec_queue_create_ioctl()
632 err = xe_vm_add_compute_exec_queue(vm, q); in xe_exec_queue_create_ioctl()
637 if (q->vm && q->hwe->hw_engine_group) { in xe_exec_queue_create_ioctl()
638 err = xe_hw_engine_group_add_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_create_ioctl()
644 q->xef = xe_file_get(xef); in xe_exec_queue_create_ioctl()
647 err = xa_alloc(&xef->exec_queue.xa, &id, q, xa_limit_32b, GFP_KERNEL); in xe_exec_queue_create_ioctl()
656 xe_exec_queue_kill(q); in xe_exec_queue_create_ioctl()
658 xe_exec_queue_put(q); in xe_exec_queue_create_ioctl()
668 struct xe_exec_queue *q; in xe_exec_queue_get_property_ioctl() local
671 if (XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_get_property_ioctl()
674 q = xe_exec_queue_lookup(xef, args->exec_queue_id); in xe_exec_queue_get_property_ioctl()
675 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_get_property_ioctl()
680 args->value = q->ops->reset_status(q); in xe_exec_queue_get_property_ioctl()
687 xe_exec_queue_put(q); in xe_exec_queue_get_property_ioctl()
694 * @q: The exec_queue
698 bool xe_exec_queue_is_lr(struct xe_exec_queue *q) in xe_exec_queue_is_lr() argument
700 return q->vm && xe_vm_in_lr_mode(q->vm) && in xe_exec_queue_is_lr()
701 !(q->flags & EXEC_QUEUE_FLAG_VM); in xe_exec_queue_is_lr()
704 static s32 xe_exec_queue_num_job_inflight(struct xe_exec_queue *q) in xe_exec_queue_num_job_inflight() argument
706 return q->lrc[0]->fence_ctx.next_seqno - xe_lrc_seqno(q->lrc[0]) - 1; in xe_exec_queue_num_job_inflight()
711 * @q: The exec_queue
715 bool xe_exec_queue_ring_full(struct xe_exec_queue *q) in xe_exec_queue_ring_full() argument
717 struct xe_lrc *lrc = q->lrc[0]; in xe_exec_queue_ring_full()
720 return xe_exec_queue_num_job_inflight(q) >= max_job; in xe_exec_queue_ring_full()
725 * @q: The exec_queue
737 bool xe_exec_queue_is_idle(struct xe_exec_queue *q) in xe_exec_queue_is_idle() argument
739 if (xe_exec_queue_is_parallel(q)) { in xe_exec_queue_is_idle()
742 for (i = 0; i < q->width; ++i) { in xe_exec_queue_is_idle()
743 if (xe_lrc_seqno(q->lrc[i]) != in xe_exec_queue_is_idle()
744 q->lrc[i]->fence_ctx.next_seqno - 1) in xe_exec_queue_is_idle()
751 return xe_lrc_seqno(q->lrc[0]) == in xe_exec_queue_is_idle()
752 q->lrc[0]->fence_ctx.next_seqno - 1; in xe_exec_queue_is_idle()
758 * @q: The exec queue
763 void xe_exec_queue_update_run_ticks(struct xe_exec_queue *q) in xe_exec_queue_update_run_ticks() argument
774 if (!q->vm || !q->vm->xef) in xe_exec_queue_update_run_ticks()
777 xef = q->vm->xef; in xe_exec_queue_update_run_ticks()
787 lrc = q->lrc[0]; in xe_exec_queue_update_run_ticks()
789 xef->run_ticks[q->class] += (new_ts - old_ts) * q->width; in xe_exec_queue_update_run_ticks()
794 * @q: The exec queue
801 void xe_exec_queue_kill(struct xe_exec_queue *q) in xe_exec_queue_kill() argument
803 struct xe_exec_queue *eq = q, *next; in xe_exec_queue_kill()
807 q->ops->kill(eq); in xe_exec_queue_kill()
808 xe_vm_remove_compute_exec_queue(q->vm, eq); in xe_exec_queue_kill()
811 q->ops->kill(q); in xe_exec_queue_kill()
812 xe_vm_remove_compute_exec_queue(q->vm, q); in xe_exec_queue_kill()
821 struct xe_exec_queue *q; in xe_exec_queue_destroy_ioctl() local
824 XE_IOCTL_DBG(xe, args->reserved[0] || args->reserved[1])) in xe_exec_queue_destroy_ioctl()
828 q = xa_erase(&xef->exec_queue.xa, args->exec_queue_id); in xe_exec_queue_destroy_ioctl()
830 if (XE_IOCTL_DBG(xe, !q)) in xe_exec_queue_destroy_ioctl()
833 if (q->vm && q->hwe->hw_engine_group) in xe_exec_queue_destroy_ioctl()
834 xe_hw_engine_group_del_exec_queue(q->hwe->hw_engine_group, q); in xe_exec_queue_destroy_ioctl()
836 xe_exec_queue_kill(q); in xe_exec_queue_destroy_ioctl()
838 trace_xe_exec_queue_close(q); in xe_exec_queue_destroy_ioctl()
839 xe_exec_queue_put(q); in xe_exec_queue_destroy_ioctl()
844 static void xe_exec_queue_last_fence_lockdep_assert(struct xe_exec_queue *q, in xe_exec_queue_last_fence_lockdep_assert() argument
847 if (q->flags & EXEC_QUEUE_FLAG_VM) { in xe_exec_queue_last_fence_lockdep_assert()
851 lockdep_assert_held(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_lockdep_assert()
857 * @q: The exec queue
860 void xe_exec_queue_last_fence_put(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_put() argument
862 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_put()
864 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_put()
869 * @q: The exec queue
873 void xe_exec_queue_last_fence_put_unlocked(struct xe_exec_queue *q) in xe_exec_queue_last_fence_put_unlocked() argument
875 if (q->last_fence) { in xe_exec_queue_last_fence_put_unlocked()
876 dma_fence_put(q->last_fence); in xe_exec_queue_last_fence_put_unlocked()
877 q->last_fence = NULL; in xe_exec_queue_last_fence_put_unlocked()
883 * @q: The exec queue
890 struct dma_fence *xe_exec_queue_last_fence_get(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get() argument
895 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_get()
897 if (q->last_fence && in xe_exec_queue_last_fence_get()
898 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get()
899 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_get()
901 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get()
908 * @q: The exec queue
917 struct dma_fence *xe_exec_queue_last_fence_get_for_resume(struct xe_exec_queue *q, in xe_exec_queue_last_fence_get_for_resume() argument
922 lockdep_assert_held_write(&q->hwe->hw_engine_group->mode_sem); in xe_exec_queue_last_fence_get_for_resume()
924 if (q->last_fence && in xe_exec_queue_last_fence_get_for_resume()
925 test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &q->last_fence->flags)) in xe_exec_queue_last_fence_get_for_resume()
926 xe_exec_queue_last_fence_put_unlocked(q); in xe_exec_queue_last_fence_get_for_resume()
928 fence = q->last_fence ? q->last_fence : dma_fence_get_stub(); in xe_exec_queue_last_fence_get_for_resume()
935 * @q: The exec queue
942 void xe_exec_queue_last_fence_set(struct xe_exec_queue *q, struct xe_vm *vm, in xe_exec_queue_last_fence_set() argument
945 xe_exec_queue_last_fence_lockdep_assert(q, vm); in xe_exec_queue_last_fence_set()
947 xe_exec_queue_last_fence_put(q, vm); in xe_exec_queue_last_fence_set()
948 q->last_fence = dma_fence_get(fence); in xe_exec_queue_last_fence_set()
953 * @q: The exec queue
959 int xe_exec_queue_last_fence_test_dep(struct xe_exec_queue *q, struct xe_vm *vm) in xe_exec_queue_last_fence_test_dep() argument
964 fence = xe_exec_queue_last_fence_get(q, vm); in xe_exec_queue_last_fence_test_dep()