Lines Matching refs:group

105 	struct panthor_group *group;  member
675 #define group_queue_work(group, wname) \ argument
677 group_get(group); \
678 if (!queue_work((group)->ptdev->scheduler->wq, &(group)->wname ## _work)) \
679 group_put(group); \
739 struct panthor_group *group; member
795 panthor_queue_get_syncwait_obj(struct panthor_group *group, struct panthor_queue *queue) in panthor_queue_get_syncwait_obj() argument
797 struct panthor_device *ptdev = group->ptdev; in panthor_queue_get_syncwait_obj()
805 bo = panthor_vm_get_bo_for_va(group->vm, in panthor_queue_get_syncwait_obj()
827 static void group_free_queue(struct panthor_group *group, struct panthor_queue *queue) in group_free_queue() argument
851 struct panthor_group *group = container_of(work, in group_release_work() local
856 for (i = 0; i < group->queue_count; i++) in group_release_work()
857 group_free_queue(group, group->queues[i]); in group_release_work()
859 panthor_kernel_bo_destroy(group->suspend_buf); in group_release_work()
860 panthor_kernel_bo_destroy(group->protm_suspend_buf); in group_release_work()
861 panthor_kernel_bo_destroy(group->syncobjs); in group_release_work()
863 panthor_vm_put(group->vm); in group_release_work()
864 kfree(group); in group_release_work()
869 struct panthor_group *group = container_of(kref, in group_release() local
872 struct panthor_device *ptdev = group->ptdev; in group_release()
874 drm_WARN_ON(&ptdev->base, group->csg_id >= 0); in group_release()
875 drm_WARN_ON(&ptdev->base, !list_empty(&group->run_node)); in group_release()
876 drm_WARN_ON(&ptdev->base, !list_empty(&group->wait_node)); in group_release()
878 queue_work(panthor_cleanup_wq, &group->release_work); in group_release()
881 static void group_put(struct panthor_group *group) in group_put() argument
883 if (group) in group_put()
884 kref_put(&group->refcount, group_release); in group_put()
888 group_get(struct panthor_group *group) in group_get() argument
890 if (group) in group_get()
891 kref_get(&group->refcount); in group_get()
893 return group; in group_get()
904 group_bind_locked(struct panthor_group *group, u32 csg_id) in group_bind_locked() argument
906 struct panthor_device *ptdev = group->ptdev; in group_bind_locked()
912 if (drm_WARN_ON(&ptdev->base, group->csg_id != -1 || csg_id >= MAX_CSGS || in group_bind_locked()
913 ptdev->scheduler->csg_slots[csg_id].group)) in group_bind_locked()
916 ret = panthor_vm_active(group->vm); in group_bind_locked()
921 group_get(group); in group_bind_locked()
922 group->csg_id = csg_id; in group_bind_locked()
931 for (u32 i = 0; i < group->queue_count; i++) in group_bind_locked()
932 group->queues[i]->doorbell_id = csg_id + 1; in group_bind_locked()
934 csg_slot->group = group; in group_bind_locked()
946 group_unbind_locked(struct panthor_group *group) in group_unbind_locked() argument
948 struct panthor_device *ptdev = group->ptdev; in group_unbind_locked()
953 if (drm_WARN_ON(&ptdev->base, group->csg_id < 0 || group->csg_id >= MAX_CSGS)) in group_unbind_locked()
956 if (drm_WARN_ON(&ptdev->base, group->state == PANTHOR_CS_GROUP_ACTIVE)) in group_unbind_locked()
959 slot = &ptdev->scheduler->csg_slots[group->csg_id]; in group_unbind_locked()
960 panthor_vm_idle(group->vm); in group_unbind_locked()
961 group->csg_id = -1; in group_unbind_locked()
964 atomic_set(&group->tiler_oom, 0); in group_unbind_locked()
965 cancel_work(&group->tiler_oom_work); in group_unbind_locked()
967 for (u32 i = 0; i < group->queue_count; i++) in group_unbind_locked()
968 group->queues[i]->doorbell_id = -1; in group_unbind_locked()
970 slot->group = NULL; in group_unbind_locked()
972 group_put(group); in group_unbind_locked()
990 struct panthor_queue *queue = ptdev->scheduler->csg_slots[csg_id].group->queues[cs_id]; in cs_slot_prog_locked()
1035 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; in cs_slot_reset_locked() local
1036 struct panthor_queue *queue = group->queues[cs_id]; in cs_slot_reset_locked()
1047 if (!(group->blocked_queues & BIT(cs_id)) && !queue->timeout_suspended) { in cs_slot_reset_locked()
1088 struct panthor_group *group = ptdev->scheduler->csg_slots[csg_id].group; in cs_slot_sync_queue_state_locked() local
1089 struct panthor_queue *queue = group->queues[cs_id]; in cs_slot_sync_queue_state_locked()
1091 panthor_fw_get_cs_iface(group->ptdev, csg_id, cs_id); in cs_slot_sync_queue_state_locked()
1099 group->idle_queues |= BIT(cs_id); in cs_slot_sync_queue_state_locked()
1103 if (list_empty(&group->wait_node)) { in cs_slot_sync_queue_state_locked()
1104 list_move_tail(&group->wait_node, in cs_slot_sync_queue_state_locked()
1105 &group->ptdev->scheduler->groups.waiting); in cs_slot_sync_queue_state_locked()
1112 group->blocked_queues |= BIT(cs_id); in cs_slot_sync_queue_state_locked()
1140 struct panthor_group *group = csg_slot->group; in csg_slot_sync_queues_state_locked() local
1145 group->idle_queues = 0; in csg_slot_sync_queues_state_locked()
1146 group->blocked_queues = 0; in csg_slot_sync_queues_state_locked()
1148 for (i = 0; i < group->queue_count; i++) { in csg_slot_sync_queues_state_locked()
1149 if (group->queues[i]) in csg_slot_sync_queues_state_locked()
1159 struct panthor_group *group; in csg_slot_sync_state_locked() local
1166 group = csg_slot->group; in csg_slot_sync_state_locked()
1168 if (!group) in csg_slot_sync_state_locked()
1171 old_state = group->state; in csg_slot_sync_state_locked()
1218 for (i = 0; i < group->queue_count; i++) { in csg_slot_sync_state_locked()
1219 if (group->queues[i]) in csg_slot_sync_state_locked()
1224 group->state = new_state; in csg_slot_sync_state_locked()
1232 struct panthor_group *group; in csg_slot_prog_locked() local
1244 group = csg_slot->group; in csg_slot_prog_locked()
1245 if (!group || group->state == PANTHOR_CS_GROUP_ACTIVE) in csg_slot_prog_locked()
1248 csg_iface = panthor_fw_get_csg_iface(group->ptdev, csg_id); in csg_slot_prog_locked()
1250 for (i = 0; i < group->queue_count; i++) { in csg_slot_prog_locked()
1251 if (group->queues[i]) { in csg_slot_prog_locked()
1257 csg_iface->input->allow_compute = group->compute_core_mask; in csg_slot_prog_locked()
1258 csg_iface->input->allow_fragment = group->fragment_core_mask; in csg_slot_prog_locked()
1259 csg_iface->input->allow_other = group->tiler_core_mask; in csg_slot_prog_locked()
1260 csg_iface->input->endpoint_req = CSG_EP_REQ_COMPUTE(group->max_compute_cores) | in csg_slot_prog_locked()
1261 CSG_EP_REQ_FRAGMENT(group->max_fragment_cores) | in csg_slot_prog_locked()
1262 CSG_EP_REQ_TILER(group->max_tiler_cores) | in csg_slot_prog_locked()
1264 csg_iface->input->config = panthor_vm_as(group->vm); in csg_slot_prog_locked()
1266 if (group->suspend_buf) in csg_slot_prog_locked()
1267 csg_iface->input->suspend_buf = panthor_kernel_bo_gpuva(group->suspend_buf); in csg_slot_prog_locked()
1271 if (group->protm_suspend_buf) { in csg_slot_prog_locked()
1273 panthor_kernel_bo_gpuva(group->protm_suspend_buf); in csg_slot_prog_locked()
1289 struct panthor_group *group = csg_slot->group; in cs_slot_process_fatal_event_locked() local
1300 if (group) in cs_slot_process_fatal_event_locked()
1301 group->fatal_queues |= BIT(cs_id); in cs_slot_process_fatal_event_locked()
1331 struct panthor_group *group = csg_slot->group; in cs_slot_process_fault_event_locked() local
1332 struct panthor_queue *queue = group && cs_id < group->queue_count ? in cs_slot_process_fault_event_locked()
1333 group->queues[cs_id] : NULL; in cs_slot_process_fault_event_locked()
1373 static int group_process_tiler_oom(struct panthor_group *group, u32 cs_id) in group_process_tiler_oom() argument
1375 struct panthor_device *ptdev = group->ptdev; in group_process_tiler_oom()
1384 csg_id = group->csg_id; in group_process_tiler_oom()
1389 heaps = panthor_vm_get_heap_pool(group->vm, false); in group_process_tiler_oom()
1423 group->fatal_queues |= BIT(cs_id); in group_process_tiler_oom()
1429 csg_id = group->csg_id; in group_process_tiler_oom()
1461 struct panthor_group *group = in group_tiler_oom_work() local
1463 u32 tiler_oom = atomic_xchg(&group->tiler_oom, 0); in group_tiler_oom_work()
1468 group_process_tiler_oom(group, cs_id); in group_tiler_oom_work()
1472 group_put(group); in group_tiler_oom_work()
1481 struct panthor_group *group = csg_slot->group; in cs_slot_process_tiler_oom_event_locked() local
1485 if (drm_WARN_ON(&ptdev->base, !group)) in cs_slot_process_tiler_oom_event_locked()
1488 atomic_or(BIT(cs_id), &group->tiler_oom); in cs_slot_process_tiler_oom_event_locked()
1493 group_get(group); in cs_slot_process_tiler_oom_event_locked()
1494 if (!queue_work(sched->heap_alloc_wq, &group->tiler_oom_work)) in cs_slot_process_tiler_oom_event_locked()
1495 group_put(group); in cs_slot_process_tiler_oom_event_locked()
1558 struct panthor_group *group = csg_slot->group; in csg_slot_sync_update_locked() local
1562 if (group) in csg_slot_sync_update_locked()
1563 group_queue_work(group, sync_upd); in csg_slot_sync_update_locked()
1573 struct panthor_group *group = csg_slot->group; in csg_slot_process_progress_timer_event_locked() local
1579 group = csg_slot->group; in csg_slot_process_progress_timer_event_locked()
1580 if (!drm_WARN_ON(&ptdev->base, !group)) in csg_slot_process_progress_timer_event_locked()
1581 group->timedout = true; in csg_slot_process_progress_timer_event_locked()
1834 group_is_idle(struct panthor_group *group) in group_is_idle() argument
1836 struct panthor_device *ptdev = group->ptdev; in group_is_idle()
1839 if (group->csg_id >= 0) in group_is_idle()
1840 return ptdev->scheduler->csg_slots[group->csg_id].idle; in group_is_idle()
1842 inactive_queues = group->idle_queues | group->blocked_queues; in group_is_idle()
1843 return hweight32(inactive_queues) == group->queue_count; in group_is_idle()
1847 group_can_run(struct panthor_group *group) in group_can_run() argument
1849 return group->state != PANTHOR_CS_GROUP_TERMINATED && in group_can_run()
1850 group->state != PANTHOR_CS_GROUP_UNKNOWN_STATE && in group_can_run()
1851 !group->destroyed && group->fatal_queues == 0 && in group_can_run()
1852 !group->timedout; in group_can_run()
1862 struct panthor_group *group, *tmp; in tick_ctx_pick_groups_from_list() local
1867 list_for_each_entry_safe(group, tmp, queue, run_node) { in tick_ctx_pick_groups_from_list()
1870 if (!group_can_run(group)) in tick_ctx_pick_groups_from_list()
1873 if (skip_idle_groups && group_is_idle(group)) in tick_ctx_pick_groups_from_list()
1877 if (ctx->vms[i] == group->vm) in tick_ctx_pick_groups_from_list()
1885 group_get(group); in tick_ctx_pick_groups_from_list()
1887 list_move_tail(&group->run_node, &ctx->groups[group->priority]); in tick_ctx_pick_groups_from_list()
1889 if (group_is_idle(group)) in tick_ctx_pick_groups_from_list()
1893 ctx->vms[ctx->as_count++] = group->vm; in tick_ctx_pick_groups_from_list()
1895 if (ctx->min_priority > group->priority) in tick_ctx_pick_groups_from_list()
1896 ctx->min_priority = group->priority; in tick_ctx_pick_groups_from_list()
1906 struct panthor_group *group, in tick_ctx_insert_old_group() argument
1909 struct panthor_csg_slot *csg_slot = &sched->csg_slots[group->csg_id]; in tick_ctx_insert_old_group()
1913 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); in tick_ctx_insert_old_group()
1925 &ctx->old_groups[csg_slot->group->priority], in tick_ctx_insert_old_group()
1930 list_add_tail(&csg_slot->group->run_node, &other_group->run_node); in tick_ctx_insert_old_group()
1935 list_add_tail(&group->run_node, &ctx->old_groups[group->priority]); in tick_ctx_insert_old_group()
1959 struct panthor_group *group = csg_slot->group; in tick_ctx_init() local
1962 if (!group) in tick_ctx_init()
1966 group_get(group); in tick_ctx_init()
1971 if (panthor_vm_has_unhandled_faults(group->vm)) { in tick_ctx_init()
1975 if (!group->fatal_queues) in tick_ctx_init()
1976 group->fatal_queues |= GENMASK(group->queue_count - 1, 0); in tick_ctx_init()
1979 tick_ctx_insert_old_group(sched, ctx, group, full_tick); in tick_ctx_init()
1995 group_term_post_processing(struct panthor_group *group) in group_term_post_processing() argument
2002 if (drm_WARN_ON(&group->ptdev->base, group_can_run(group))) in group_term_post_processing()
2006 for (i = 0; i < group->queue_count; i++) { in group_term_post_processing()
2007 struct panthor_queue *queue = group->queues[i]; in group_term_post_processing()
2011 if (group->fatal_queues & BIT(i)) in group_term_post_processing()
2013 else if (group->timedout) in group_term_post_processing()
2030 syncobj = group->syncobjs->kmap + (i * sizeof(*syncobj)); in group_term_post_processing()
2033 sched_queue_work(group->ptdev->scheduler, sync_upd); in group_term_post_processing()
2045 struct panthor_group *group = in group_term_work() local
2048 group_term_post_processing(group); in group_term_work()
2049 group_put(group); in group_term_work()
2057 struct panthor_group *group, *tmp; in tick_ctx_cleanup() local
2061 list_for_each_entry_safe(group, tmp, &ctx->old_groups[i], run_node) { in tick_ctx_cleanup()
2066 group_can_run(group)); in tick_ctx_cleanup()
2068 if (!group_can_run(group)) { in tick_ctx_cleanup()
2069 list_del_init(&group->run_node); in tick_ctx_cleanup()
2070 list_del_init(&group->wait_node); in tick_ctx_cleanup()
2071 group_queue_work(group, term); in tick_ctx_cleanup()
2072 } else if (group->csg_id >= 0) { in tick_ctx_cleanup()
2073 list_del_init(&group->run_node); in tick_ctx_cleanup()
2075 list_move(&group->run_node, in tick_ctx_cleanup()
2076 group_is_idle(group) ? in tick_ctx_cleanup()
2077 &sched->groups.idle[group->priority] : in tick_ctx_cleanup()
2078 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup()
2080 group_put(group); in tick_ctx_cleanup()
2091 list_for_each_entry_safe(group, tmp, &ctx->groups[i], run_node) { in tick_ctx_cleanup()
2092 if (group->csg_id >= 0) { in tick_ctx_cleanup()
2093 list_del_init(&group->run_node); in tick_ctx_cleanup()
2095 list_move(&group->run_node, in tick_ctx_cleanup()
2096 group_is_idle(group) ? in tick_ctx_cleanup()
2097 &sched->groups.idle[group->priority] : in tick_ctx_cleanup()
2098 &sched->groups.runnable[group->priority]); in tick_ctx_cleanup()
2100 group_put(group); in tick_ctx_cleanup()
2108 struct panthor_group *group, *tmp; in tick_ctx_apply() local
2120 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { in tick_ctx_apply()
2121 bool term = !group_can_run(group); in tick_ctx_apply()
2122 int csg_id = group->csg_id; in tick_ctx_apply()
2134 list_for_each_entry(group, &ctx->groups[prio], run_node) { in tick_ctx_apply()
2136 int csg_id = group->csg_id; in tick_ctx_apply()
2169 list_for_each_entry(group, &ctx->old_groups[prio], run_node) { in tick_ctx_apply()
2174 if (group->csg_id >= 0) in tick_ctx_apply()
2175 sched_process_csg_irq_locked(ptdev, group->csg_id); in tick_ctx_apply()
2177 group_unbind_locked(group); in tick_ctx_apply()
2182 if (!sched->csg_slots[i].group) in tick_ctx_apply()
2191 list_for_each_entry(group, &ctx->groups[prio], run_node) { in tick_ctx_apply()
2192 int csg_id = group->csg_id; in tick_ctx_apply()
2206 group_bind_locked(group, csg_id); in tick_ctx_apply()
2209 group->state == PANTHOR_CS_GROUP_SUSPENDED ? in tick_ctx_apply()
2227 list_for_each_entry_safe(group, tmp, &ctx->groups[prio], run_node) { in tick_ctx_apply()
2228 list_del_init(&group->run_node); in tick_ctx_apply()
2235 if (group->destroyed) in tick_ctx_apply()
2237 group_put(group); in tick_ctx_apply()
2245 list_for_each_entry_safe(group, tmp, &ctx->old_groups[prio], run_node) { in tick_ctx_apply()
2246 if (!group_can_run(group)) in tick_ctx_apply()
2249 if (group_is_idle(group)) in tick_ctx_apply()
2250 list_move_tail(&group->run_node, &sched->groups.idle[prio]); in tick_ctx_apply()
2252 list_move_tail(&group->run_node, &sched->groups.runnable[prio]); in tick_ctx_apply()
2253 group_put(group); in tick_ctx_apply()
2400 static int panthor_queue_eval_syncwait(struct panthor_group *group, u8 queue_idx) in panthor_queue_eval_syncwait() argument
2402 struct panthor_queue *queue = group->queues[queue_idx]; in panthor_queue_eval_syncwait()
2410 syncobj = panthor_queue_get_syncwait_obj(group, queue); in panthor_queue_eval_syncwait()
2434 struct panthor_group *group, *tmp; in sync_upd_work() local
2438 list_for_each_entry_safe(group, tmp, &sched->groups.waiting, wait_node) { in sync_upd_work()
2439 u32 tested_queues = group->blocked_queues; in sync_upd_work()
2446 ret = panthor_queue_eval_syncwait(group, cs_id); in sync_upd_work()
2447 drm_WARN_ON(&group->ptdev->base, ret < 0); in sync_upd_work()
2455 group->blocked_queues &= ~unblocked_queues; in sync_upd_work()
2457 if (group->csg_id < 0) { in sync_upd_work()
2458 list_move(&group->run_node, in sync_upd_work()
2459 &sched->groups.runnable[group->priority]); in sync_upd_work()
2460 if (group->priority == PANTHOR_CSG_PRIORITY_RT) in sync_upd_work()
2465 if (!group->blocked_queues) in sync_upd_work()
2466 list_del_init(&group->wait_node); in sync_upd_work()
2474 static void group_schedule_locked(struct panthor_group *group, u32 queue_mask) in group_schedule_locked() argument
2476 struct panthor_device *ptdev = group->ptdev; in group_schedule_locked()
2478 struct list_head *queue = &sched->groups.runnable[group->priority]; in group_schedule_locked()
2483 if (!group_can_run(group)) in group_schedule_locked()
2487 if ((queue_mask & group->blocked_queues) == queue_mask) in group_schedule_locked()
2490 was_idle = group_is_idle(group); in group_schedule_locked()
2491 group->idle_queues &= ~queue_mask; in group_schedule_locked()
2497 if (was_idle && !group_is_idle(group)) in group_schedule_locked()
2498 list_move_tail(&group->run_node, queue); in group_schedule_locked()
2501 if (group->priority == PANTHOR_CSG_PRIORITY_RT) { in group_schedule_locked()
2552 static void panthor_group_stop(struct panthor_group *group) in panthor_group_stop() argument
2554 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_stop()
2558 for (u32 i = 0; i < group->queue_count; i++) in panthor_group_stop()
2559 queue_stop(group->queues[i], NULL); in panthor_group_stop()
2561 group_get(group); in panthor_group_stop()
2562 list_move_tail(&group->run_node, &sched->reset.stopped_groups); in panthor_group_stop()
2565 static void panthor_group_start(struct panthor_group *group) in panthor_group_start() argument
2567 struct panthor_scheduler *sched = group->ptdev->scheduler; in panthor_group_start()
2569 lockdep_assert_held(&group->ptdev->scheduler->reset.lock); in panthor_group_start()
2571 for (u32 i = 0; i < group->queue_count; i++) in panthor_group_start()
2572 queue_start(group->queues[i]); in panthor_group_start()
2574 if (group_can_run(group)) { in panthor_group_start()
2575 list_move_tail(&group->run_node, in panthor_group_start()
2576 group_is_idle(group) ? in panthor_group_start()
2577 &sched->groups.idle[group->priority] : in panthor_group_start()
2578 &sched->groups.runnable[group->priority]); in panthor_group_start()
2580 list_del_init(&group->run_node); in panthor_group_start()
2581 list_del_init(&group->wait_node); in panthor_group_start()
2582 group_queue_work(group, term); in panthor_group_start()
2585 group_put(group); in panthor_group_start()
2615 struct panthor_group *group; in panthor_sched_suspend() local
2624 if (csg_slot->group) { in panthor_sched_suspend()
2626 group_can_run(csg_slot->group) ? in panthor_sched_suspend()
2649 csg_slot->group->timedout = true; in panthor_sched_suspend()
2668 if (csg_slot->group->state != PANTHOR_CS_GROUP_TERMINATED) in panthor_sched_suspend()
2669 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; in panthor_sched_suspend()
2689 csg_slot->group->state = PANTHOR_CS_GROUP_TERMINATED; in panthor_sched_suspend()
2700 group = csg_slot->group; in panthor_sched_suspend()
2701 if (!group) in panthor_sched_suspend()
2704 group_get(group); in panthor_sched_suspend()
2706 if (group->csg_id >= 0) in panthor_sched_suspend()
2707 sched_process_csg_irq_locked(ptdev, group->csg_id); in panthor_sched_suspend()
2709 group_unbind_locked(group); in panthor_sched_suspend()
2711 drm_WARN_ON(&group->ptdev->base, !list_empty(&group->run_node)); in panthor_sched_suspend()
2713 if (group_can_run(group)) { in panthor_sched_suspend()
2714 list_add(&group->run_node, in panthor_sched_suspend()
2715 &sched->groups.idle[group->priority]); in panthor_sched_suspend()
2720 list_del_init(&group->wait_node); in panthor_sched_suspend()
2721 group_queue_work(group, term); in panthor_sched_suspend()
2723 group_put(group); in panthor_sched_suspend()
2731 struct panthor_group *group, *group_tmp; in panthor_sched_pre_reset() local
2751 list_for_each_entry_safe(group, group_tmp, &sched->groups.runnable[i], run_node) in panthor_sched_pre_reset()
2752 panthor_group_stop(group); in panthor_sched_pre_reset()
2756 list_for_each_entry_safe(group, group_tmp, &sched->groups.idle[i], run_node) in panthor_sched_pre_reset()
2757 panthor_group_stop(group); in panthor_sched_pre_reset()
2766 struct panthor_group *group, *group_tmp; in panthor_sched_post_reset() local
2770 list_for_each_entry_safe(group, group_tmp, &sched->reset.stopped_groups, run_node) { in panthor_sched_post_reset()
2775 group->state = PANTHOR_CS_GROUP_TERMINATED; in panthor_sched_post_reset()
2777 panthor_group_start(group); in panthor_sched_post_reset()
2795 struct panthor_group *group = in group_sync_upd_work() local
2803 for (queue_idx = 0; queue_idx < group->queue_count; queue_idx++) { in group_sync_upd_work()
2804 struct panthor_queue *queue = group->queues[queue_idx]; in group_sync_upd_work()
2810 syncobj = group->syncobjs->kmap + (queue_idx * sizeof(*syncobj)); in group_sync_upd_work()
2829 group_put(group); in group_sync_upd_work()
2836 struct panthor_group *group = job->group; in queue_run_job() local
2837 struct panthor_queue *queue = group->queues[job->queue_idx]; in queue_run_job()
2838 struct panthor_device *ptdev = group->ptdev; in queue_run_job()
2845 u64 sync_addr = panthor_kernel_bo_gpuva(group->syncobjs) + in queue_run_job()
2906 if (!group_can_run(group)) { in queue_run_job()
2936 if (group->csg_id < 0) { in queue_run_job()
2942 if (!(group->blocked_queues & BIT(job->queue_idx)) && in queue_run_job()
2948 group_schedule_locked(group, BIT(job->queue_idx)); in queue_run_job()
2952 !(group->blocked_queues & BIT(job->queue_idx))) { in queue_run_job()
2977 struct panthor_group *group = job->group; in queue_timedout_job() local
2978 struct panthor_device *ptdev = group->ptdev; in queue_timedout_job()
2980 struct panthor_queue *queue = group->queues[job->queue_idx]; in queue_timedout_job()
2989 group->timedout = true; in queue_timedout_job()
2990 if (group->csg_id >= 0) { in queue_timedout_job()
2996 list_del_init(&group->run_node); in queue_timedout_job()
2997 list_del_init(&group->wait_node); in queue_timedout_job()
2999 group_queue_work(group, term); in queue_timedout_job()
3021 group_create_queue(struct panthor_group *group, in group_create_queue() argument
3048 queue->ringbuf = panthor_kernel_bo_create(group->ptdev, group->vm, in group_create_queue()
3063 queue->iface.mem = panthor_fw_alloc_queue_iface_mem(group->ptdev, in group_create_queue()
3074 group->ptdev->scheduler->wq, 1, in group_create_queue()
3077 group->ptdev->reset.wq, in group_create_queue()
3078 NULL, "panthor-queue", group->ptdev->base.dev); in group_create_queue()
3088 group_free_queue(group, queue); in group_create_queue()
3102 struct panthor_group *group = NULL; in panthor_group_create() local
3122 group = kzalloc(sizeof(*group), GFP_KERNEL); in panthor_group_create()
3123 if (!group) in panthor_group_create()
3126 spin_lock_init(&group->fatal_lock); in panthor_group_create()
3127 kref_init(&group->refcount); in panthor_group_create()
3128 group->state = PANTHOR_CS_GROUP_CREATED; in panthor_group_create()
3129 group->csg_id = -1; in panthor_group_create()
3131 group->ptdev = ptdev; in panthor_group_create()
3132 group->max_compute_cores = group_args->max_compute_cores; in panthor_group_create()
3133 group->compute_core_mask = group_args->compute_core_mask; in panthor_group_create()
3134 group->max_fragment_cores = group_args->max_fragment_cores; in panthor_group_create()
3135 group->fragment_core_mask = group_args->fragment_core_mask; in panthor_group_create()
3136 group->max_tiler_cores = group_args->max_tiler_cores; in panthor_group_create()
3137 group->tiler_core_mask = group_args->tiler_core_mask; in panthor_group_create()
3138 group->priority = group_args->priority; in panthor_group_create()
3140 INIT_LIST_HEAD(&group->wait_node); in panthor_group_create()
3141 INIT_LIST_HEAD(&group->run_node); in panthor_group_create()
3142 INIT_WORK(&group->term_work, group_term_work); in panthor_group_create()
3143 INIT_WORK(&group->sync_upd_work, group_sync_upd_work); in panthor_group_create()
3144 INIT_WORK(&group->tiler_oom_work, group_tiler_oom_work); in panthor_group_create()
3145 INIT_WORK(&group->release_work, group_release_work); in panthor_group_create()
3147 group->vm = panthor_vm_pool_get_vm(pfile->vms, group_args->vm_id); in panthor_group_create()
3148 if (!group->vm) { in panthor_group_create()
3154 group->suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); in panthor_group_create()
3155 if (IS_ERR(group->suspend_buf)) { in panthor_group_create()
3156 ret = PTR_ERR(group->suspend_buf); in panthor_group_create()
3157 group->suspend_buf = NULL; in panthor_group_create()
3162 group->protm_suspend_buf = panthor_fw_alloc_suspend_buf_mem(ptdev, suspend_size); in panthor_group_create()
3163 if (IS_ERR(group->protm_suspend_buf)) { in panthor_group_create()
3164 ret = PTR_ERR(group->protm_suspend_buf); in panthor_group_create()
3165 group->protm_suspend_buf = NULL; in panthor_group_create()
3169 group->syncobjs = panthor_kernel_bo_create(ptdev, group->vm, in panthor_group_create()
3176 if (IS_ERR(group->syncobjs)) { in panthor_group_create()
3177 ret = PTR_ERR(group->syncobjs); in panthor_group_create()
3181 ret = panthor_kernel_bo_vmap(group->syncobjs); in panthor_group_create()
3185 memset(group->syncobjs->kmap, 0, in panthor_group_create()
3189 group->queues[i] = group_create_queue(group, &queue_args[i]); in panthor_group_create()
3190 if (IS_ERR(group->queues[i])) { in panthor_group_create()
3191 ret = PTR_ERR(group->queues[i]); in panthor_group_create()
3192 group->queues[i] = NULL; in panthor_group_create()
3196 group->queue_count++; in panthor_group_create()
3199 group->idle_queues = GENMASK(group->queue_count - 1, 0); in panthor_group_create()
3201 ret = xa_alloc(&gpool->xa, &gid, group, XA_LIMIT(1, MAX_GROUPS_PER_POOL), GFP_KERNEL); in panthor_group_create()
3207 panthor_group_stop(group); in panthor_group_create()
3210 list_add_tail(&group->run_node, in panthor_group_create()
3211 &sched->groups.idle[group->priority]); in panthor_group_create()
3219 group_put(group); in panthor_group_create()
3228 struct panthor_group *group; in panthor_group_destroy() local
3230 group = xa_erase(&gpool->xa, group_handle); in panthor_group_destroy()
3231 if (!group) in panthor_group_destroy()
3234 for (u32 i = 0; i < group->queue_count; i++) { in panthor_group_destroy()
3235 if (group->queues[i]) in panthor_group_destroy()
3236 drm_sched_entity_destroy(&group->queues[i]->entity); in panthor_group_destroy()
3241 group->destroyed = true; in panthor_group_destroy()
3242 if (group->csg_id >= 0) { in panthor_group_destroy()
3248 list_del_init(&group->run_node); in panthor_group_destroy()
3249 list_del_init(&group->wait_node); in panthor_group_destroy()
3250 group_queue_work(group, term); in panthor_group_destroy()
3255 group_put(group); in panthor_group_destroy()
3262 struct panthor_group *group; in group_from_handle() local
3265 group = group_get(xa_load(&pool->xa, group_handle)); in group_from_handle()
3268 return group; in group_from_handle()
3277 struct panthor_group *group; in panthor_group_get_state() local
3282 group = group_from_handle(gpool, get_state->group_handle); in panthor_group_get_state()
3283 if (!group) in panthor_group_get_state()
3289 if (group->timedout) in panthor_group_get_state()
3291 if (group->fatal_queues) { in panthor_group_get_state()
3293 get_state->fatal_queues = group->fatal_queues; in panthor_group_get_state()
3297 group_put(group); in panthor_group_get_state()
3317 struct panthor_group *group; in panthor_group_pool_destroy() local
3323 xa_for_each(&gpool->xa, i, group) in panthor_group_pool_destroy()
3335 drm_WARN_ON(&job->group->ptdev->base, !list_empty(&job->node)); in job_release()
3345 group_put(job->group); in job_release()
3373 return job->group->vm; in panthor_job_vm()
3413 job->group = group_from_handle(gpool, group_handle); in panthor_job_create()
3414 if (!job->group) { in panthor_job_create()
3419 if (!group_can_run(job->group)) { in panthor_job_create()
3424 if (job->queue_idx >= job->group->queue_count || in panthor_job_create()
3425 !job->group->queues[job->queue_idx]) { in panthor_job_create()
3442 &job->group->queues[job->queue_idx]->entity, in panthor_job_create()
3443 1, job->group); in panthor_job_create()
3458 panthor_vm_update_resvs(job->group->vm, exec, &sched_job->s_fence->finished, in panthor_job_update_resvs()