Lines Matching refs:group
18 struct xe_hw_engine_group *group = arg; in hw_engine_group_free() local
20 destroy_workqueue(group->resume_wq); in hw_engine_group_free()
21 kfree(group); in hw_engine_group_free()
28 struct xe_hw_engine_group *group = container_of(w, struct xe_hw_engine_group, resume_work); in hw_engine_group_resume_lr_jobs_func() local
32 err = xe_hw_engine_group_get_mode(group, EXEC_MODE_LR, &previous_mode); in hw_engine_group_resume_lr_jobs_func()
39 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in hw_engine_group_resume_lr_jobs_func()
47 xe_hw_engine_group_put(group); in hw_engine_group_resume_lr_jobs_func()
53 struct xe_hw_engine_group *group; in hw_engine_group_alloc() local
56 group = kzalloc(sizeof(*group), GFP_KERNEL); in hw_engine_group_alloc()
57 if (!group) in hw_engine_group_alloc()
60 group->resume_wq = alloc_workqueue("xe-resume-lr-jobs-wq", 0, 0); in hw_engine_group_alloc()
61 if (!group->resume_wq) in hw_engine_group_alloc()
64 init_rwsem(&group->mode_sem); in hw_engine_group_alloc()
65 INIT_WORK(&group->resume_work, hw_engine_group_resume_lr_jobs_func); in hw_engine_group_alloc()
66 INIT_LIST_HEAD(&group->exec_queue_list); in hw_engine_group_alloc()
68 err = drmm_add_action_or_reset(&xe->drm, hw_engine_group_free, group); in hw_engine_group_alloc()
72 return group; in hw_engine_group_alloc()
147 int xe_hw_engine_group_add_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q) in xe_hw_engine_group_add_exec_queue() argument
152 xe_assert(xe, group); in xe_hw_engine_group_add_exec_queue()
159 err = down_write_killable(&group->mode_sem); in xe_hw_engine_group_add_exec_queue()
163 if (xe_vm_in_fault_mode(q->vm) && group->cur_mode == EXEC_MODE_DMA_FENCE) { in xe_hw_engine_group_add_exec_queue()
169 xe_hw_engine_group_resume_faulting_lr_jobs(group); in xe_hw_engine_group_add_exec_queue()
172 list_add(&q->hw_engine_group_link, &group->exec_queue_list); in xe_hw_engine_group_add_exec_queue()
173 up_write(&group->mode_sem); in xe_hw_engine_group_add_exec_queue()
178 up_write(&group->mode_sem); in xe_hw_engine_group_add_exec_queue()
187 void xe_hw_engine_group_del_exec_queue(struct xe_hw_engine_group *group, struct xe_exec_queue *q) in xe_hw_engine_group_del_exec_queue() argument
191 xe_assert(xe, group); in xe_hw_engine_group_del_exec_queue()
194 down_write(&group->mode_sem); in xe_hw_engine_group_del_exec_queue()
199 up_write(&group->mode_sem); in xe_hw_engine_group_del_exec_queue()
207 void xe_hw_engine_group_resume_faulting_lr_jobs(struct xe_hw_engine_group *group) in xe_hw_engine_group_resume_faulting_lr_jobs() argument
209 queue_work(group->resume_wq, &group->resume_work); in xe_hw_engine_group_resume_faulting_lr_jobs()
218 static int xe_hw_engine_group_suspend_faulting_lr_jobs(struct xe_hw_engine_group *group) in xe_hw_engine_group_suspend_faulting_lr_jobs() argument
224 lockdep_assert_held_write(&group->mode_sem); in xe_hw_engine_group_suspend_faulting_lr_jobs()
226 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in xe_hw_engine_group_suspend_faulting_lr_jobs()
234 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in xe_hw_engine_group_suspend_faulting_lr_jobs()
244 xe_hw_engine_group_resume_faulting_lr_jobs(group); in xe_hw_engine_group_suspend_faulting_lr_jobs()
249 up_write(&group->mode_sem); in xe_hw_engine_group_suspend_faulting_lr_jobs()
263 static int xe_hw_engine_group_wait_for_dma_fence_jobs(struct xe_hw_engine_group *group) in xe_hw_engine_group_wait_for_dma_fence_jobs() argument
269 lockdep_assert_held_write(&group->mode_sem); in xe_hw_engine_group_wait_for_dma_fence_jobs()
271 list_for_each_entry(q, &group->exec_queue_list, hw_engine_group_link) { in xe_hw_engine_group_wait_for_dma_fence_jobs()
286 static int switch_mode(struct xe_hw_engine_group *group) in switch_mode() argument
291 lockdep_assert_held_write(&group->mode_sem); in switch_mode()
293 switch (group->cur_mode) { in switch_mode()
296 err = xe_hw_engine_group_suspend_faulting_lr_jobs(group); in switch_mode()
300 err = xe_hw_engine_group_wait_for_dma_fence_jobs(group); in switch_mode()
307 group->cur_mode = new_mode; in switch_mode()
320 int xe_hw_engine_group_get_mode(struct xe_hw_engine_group *group, in xe_hw_engine_group_get_mode() argument
323 __acquires(&group->mode_sem) in xe_hw_engine_group_get_mode()
325 int err = down_read_interruptible(&group->mode_sem); in xe_hw_engine_group_get_mode()
330 *previous_mode = group->cur_mode; in xe_hw_engine_group_get_mode()
332 if (new_mode != group->cur_mode) { in xe_hw_engine_group_get_mode()
333 up_read(&group->mode_sem); in xe_hw_engine_group_get_mode()
334 err = down_write_killable(&group->mode_sem); in xe_hw_engine_group_get_mode()
338 if (new_mode != group->cur_mode) { in xe_hw_engine_group_get_mode()
339 err = switch_mode(group); in xe_hw_engine_group_get_mode()
341 up_write(&group->mode_sem); in xe_hw_engine_group_get_mode()
345 downgrade_write(&group->mode_sem); in xe_hw_engine_group_get_mode()
355 void xe_hw_engine_group_put(struct xe_hw_engine_group *group) in xe_hw_engine_group_put() argument
356 __releases(&group->mode_sem) in xe_hw_engine_group_put()
358 up_read(&group->mode_sem); in xe_hw_engine_group_put()