Lines Matching refs:cs

250 void cs_get(struct hl_cs *cs)  in cs_get()  argument
252 kref_get(&cs->refcount); in cs_get()
255 static int cs_get_unless_zero(struct hl_cs *cs) in cs_get_unless_zero() argument
257 return kref_get_unless_zero(&cs->refcount); in cs_get_unless_zero()
260 static void cs_put(struct hl_cs *cs) in cs_put() argument
262 kref_put(&cs->refcount, cs_do_release); in cs_put()
277 bool cs_needs_completion(struct hl_cs *cs) in cs_needs_completion() argument
282 if (cs->staged_cs && !cs->staged_last) in cs_needs_completion()
288 bool cs_needs_timeout(struct hl_cs *cs) in cs_needs_timeout() argument
293 if (cs->staged_cs && !cs->staged_first) in cs_needs_timeout()
322 parser.ctx_id = job->cs->ctx->asid; in cs_parser()
323 parser.cs_sequence = job->cs->sequence; in cs_parser()
334 parser.completion = cs_needs_completion(job->cs); in cs_parser()
363 struct hl_cs *cs = job->cs; in hl_complete_job() local
393 spin_lock(&cs->job_lock); in hl_complete_job()
395 spin_unlock(&cs->job_lock); in hl_complete_job()
411 if (cs_needs_completion(cs) && in hl_complete_job()
418 cs->completion_timestamp = job->timestamp; in hl_complete_job()
420 cs_put(cs); in hl_complete_job()
438 struct hl_cs *cs; in hl_staged_cs_find_first() local
440 list_for_each_entry_reverse(cs, &hdev->cs_mirror_list, mirror_node) in hl_staged_cs_find_first()
441 if (cs->staged_cs && cs->staged_first && in hl_staged_cs_find_first()
442 cs->sequence == cs_seq) in hl_staged_cs_find_first()
443 return cs; in hl_staged_cs_find_first()
455 bool is_staged_cs_last_exists(struct hl_device *hdev, struct hl_cs *cs) in is_staged_cs_last_exists() argument
459 last_entry = list_last_entry(&cs->staged_cs_node, struct hl_cs, in is_staged_cs_last_exists()
478 static void staged_cs_get(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_get() argument
485 if (!cs->staged_last) in staged_cs_get()
486 cs_get(cs); in staged_cs_get()
497 static void staged_cs_put(struct hl_device *hdev, struct hl_cs *cs) in staged_cs_put() argument
502 if (!cs_needs_completion(cs)) in staged_cs_put()
503 cs_put(cs); in staged_cs_put()
506 static void cs_handle_tdr(struct hl_device *hdev, struct hl_cs *cs) in cs_handle_tdr() argument
510 if (!cs_needs_timeout(cs)) in cs_handle_tdr()
523 if (cs->staged_cs && cs->staged_last) { in cs_handle_tdr()
524 first_cs = hl_staged_cs_find_first(hdev, cs->staged_sequence); in cs_handle_tdr()
526 cs = first_cs; in cs_handle_tdr()
534 if (cs->timedout || hdev->timeout_jiffies == MAX_SCHEDULE_TIMEOUT) in cs_handle_tdr()
537 if (cs->tdr_active) in cs_handle_tdr()
538 cancel_delayed_work_sync(&cs->work_tdr); in cs_handle_tdr()
606 static void complete_multi_cs(struct hl_device *hdev, struct hl_cs *cs) in complete_multi_cs() argument
608 struct hl_fence *fence = cs->fence; in complete_multi_cs()
612 if (cs->staged_cs && !cs->staged_first) in complete_multi_cs()
656 struct hl_cs *cs, in cs_release_sob_reset_handler() argument
663 if (!hl_cs_cmpl->hw_sob || !cs->submitted) in cs_release_sob_reset_handler()
696 struct hl_cs *cs = container_of(ref, struct hl_cs, refcount); in cs_do_release() local
697 struct hl_device *hdev = cs->ctx->hdev; in cs_do_release()
700 container_of(cs->fence, struct hl_cs_compl, base_fence); in cs_do_release()
702 cs->completed = true; in cs_do_release()
712 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_do_release()
715 if (!cs->submitted) { in cs_do_release()
721 if (cs->type == CS_TYPE_WAIT || in cs_do_release()
722 cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_do_release()
723 hl_fence_put(cs->signal_fence); in cs_do_release()
729 hl_hw_queue_update_ci(cs); in cs_do_release()
733 list_del_init(&cs->mirror_node); in cs_do_release()
736 cs_handle_tdr(hdev, cs); in cs_do_release()
738 if (cs->staged_cs) { in cs_do_release()
742 if (cs->staged_last) { in cs_do_release()
746 &cs->staged_cs_node, staged_cs_node) in cs_do_release()
754 if (cs->submitted) { in cs_do_release()
756 list_del(&cs->staged_cs_node); in cs_do_release()
768 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) && cs->encaps_signals) in cs_do_release()
769 kref_put(&cs->encaps_sig_hdl->refcount, hl_encaps_release_handle_and_put_ctx); in cs_do_release()
775 hl_debugfs_remove_cs(cs); in cs_do_release()
777 hdev->shadow_cs_queue[cs->sequence & (hdev->asic_prop.max_pending_cs - 1)] = NULL; in cs_do_release()
783 if (cs->timedout) in cs_do_release()
784 cs->fence->error = -ETIMEDOUT; in cs_do_release()
785 else if (cs->aborted) in cs_do_release()
786 cs->fence->error = -EIO; in cs_do_release()
787 else if (!cs->submitted) in cs_do_release()
788 cs->fence->error = -EBUSY; in cs_do_release()
790 if (unlikely(cs->skip_reset_on_timeout)) { in cs_do_release()
793 cs->sequence, in cs_do_release()
794 div_u64(jiffies - cs->submission_time_jiffies, HZ)); in cs_do_release()
797 if (cs->timestamp) { in cs_do_release()
798 cs->fence->timestamp = cs->completion_timestamp; in cs_do_release()
799 hl_push_cs_outcome(hdev, &cs->ctx->outcome_store, cs->sequence, in cs_do_release()
800 cs->fence->timestamp, cs->fence->error); in cs_do_release()
803 hl_ctx_put(cs->ctx); in cs_do_release()
805 complete_all(&cs->fence->completion); in cs_do_release()
806 complete_multi_cs(hdev, cs); in cs_do_release()
808 cs_release_sob_reset_handler(hdev, cs, hl_cs_cmpl); in cs_do_release()
810 hl_fence_put(cs->fence); in cs_do_release()
812 kfree(cs->jobs_in_queue_cnt); in cs_do_release()
813 kfree(cs); in cs_do_release()
818 struct hl_cs *cs = container_of(work, struct hl_cs, work_tdr.work); in cs_timedout() local
825 skip_reset_on_timeout = cs->skip_reset_on_timeout; in cs_timedout()
827 rc = cs_get_unless_zero(cs); in cs_timedout()
831 if ((!cs->submitted) || (cs->completed)) { in cs_timedout()
832 cs_put(cs); in cs_timedout()
836 hdev = cs->ctx->hdev; in cs_timedout()
845 cs->timedout = true; in cs_timedout()
852 hdev->captured_err_info.cs_timeout.seq = cs->sequence; in cs_timedout()
858 switch (cs->type) { in cs_timedout()
862 cs->sequence, timeout_sec); in cs_timedout()
868 cs->sequence, timeout_sec); in cs_timedout()
874 cs->sequence, timeout_sec); in cs_timedout()
880 cs->sequence, timeout_sec); in cs_timedout()
888 cs_put(cs); in cs_timedout()
905 struct hl_cs *cs; in allocate_cs() local
910 cs = kzalloc(sizeof(*cs), GFP_ATOMIC); in allocate_cs()
911 if (!cs) in allocate_cs()
912 cs = kzalloc(sizeof(*cs), GFP_KERNEL); in allocate_cs()
914 if (!cs) { in allocate_cs()
923 cs->ctx = ctx; in allocate_cs()
924 cs->submitted = false; in allocate_cs()
925 cs->completed = false; in allocate_cs()
926 cs->type = cs_type; in allocate_cs()
927 cs->timestamp = !!(flags & HL_CS_FLAGS_TIMESTAMP); in allocate_cs()
928 cs->encaps_signals = !!(flags & HL_CS_FLAGS_ENCAP_SIGNALS); in allocate_cs()
929 cs->timeout_jiffies = timeout; in allocate_cs()
930 cs->skip_reset_on_timeout = in allocate_cs()
933 cs->submission_time_jiffies = jiffies; in allocate_cs()
934 INIT_LIST_HEAD(&cs->job_list); in allocate_cs()
935 INIT_DELAYED_WORK(&cs->work_tdr, cs_timedout); in allocate_cs()
936 kref_init(&cs->refcount); in allocate_cs()
937 spin_lock_init(&cs->job_lock); in allocate_cs()
950 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
951 sizeof(*cs->jobs_in_queue_cnt), GFP_ATOMIC); in allocate_cs()
952 if (!cs->jobs_in_queue_cnt) in allocate_cs()
953 cs->jobs_in_queue_cnt = kcalloc(hdev->asic_prop.max_queues, in allocate_cs()
954 sizeof(*cs->jobs_in_queue_cnt), GFP_KERNEL); in allocate_cs()
956 if (!cs->jobs_in_queue_cnt) { in allocate_cs()
964 cs_cmpl->type = cs->type; in allocate_cs()
966 cs->fence = &cs_cmpl->base_fence; in allocate_cs()
998 cs->sequence = cs_cmpl->cs_seq; in allocate_cs()
1011 *cs_new = cs; in allocate_cs()
1017 kfree(cs->jobs_in_queue_cnt); in allocate_cs()
1021 kfree(cs); in allocate_cs()
1026 static void cs_rollback(struct hl_device *hdev, struct hl_cs *cs) in cs_rollback() argument
1030 staged_cs_put(hdev, cs); in cs_rollback()
1032 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_rollback()
1067 struct hl_cs *cs, *tmp; in hl_cs_rollback_all() local
1082 list_for_each_entry_safe(cs, tmp, &hdev->cs_mirror_list, mirror_node) { in hl_cs_rollback_all()
1083 cs_get(cs); in hl_cs_rollback_all()
1084 cs->aborted = true; in hl_cs_rollback_all()
1086 cs->ctx->asid, cs->sequence); in hl_cs_rollback_all()
1087 cs_rollback(hdev, cs); in hl_cs_rollback_all()
1088 cs_put(cs); in hl_cs_rollback_all()
1148 struct hl_cs *cs; in force_complete_cs() local
1152 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) { in force_complete_cs()
1153 cs->fence->error = -EIO; in force_complete_cs()
1154 complete_all(&cs->fence->completion); in force_complete_cs()
1170 struct hl_cs *cs = job->cs; in job_wq_completion() local
1171 struct hl_device *hdev = cs->ctx->hdev; in job_wq_completion()
1179 struct hl_cs *cs = container_of(work, struct hl_cs, finish_work); in cs_completion() local
1180 struct hl_device *hdev = cs->ctx->hdev; in cs_completion()
1183 list_for_each_entry_safe(job, tmp, &cs->job_list, cs_node) in cs_completion()
1190 struct hl_cs *cs; in hl_get_active_cs_num() local
1194 list_for_each_entry(cs, &hdev->cs_mirror_list, mirror_node) in hl_get_active_cs_num()
1195 if (!cs->completed) in hl_get_active_cs_num()
1446 static int cs_staged_submission(struct hl_device *hdev, struct hl_cs *cs, in cs_staged_submission() argument
1453 cs->staged_last = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_LAST); in cs_staged_submission()
1454 cs->staged_first = !!(flags & HL_CS_FLAGS_STAGED_SUBMISSION_FIRST); in cs_staged_submission()
1456 if (cs->staged_first) { in cs_staged_submission()
1458 INIT_LIST_HEAD(&cs->staged_cs_node); in cs_staged_submission()
1459 cs->staged_sequence = cs->sequence; in cs_staged_submission()
1461 if (cs->encaps_signals) in cs_staged_submission()
1462 cs->encaps_sig_hdl_id = encaps_signal_handle; in cs_staged_submission()
1467 cs->staged_sequence = sequence; in cs_staged_submission()
1471 staged_cs_get(hdev, cs); in cs_staged_submission()
1473 cs->staged_cs = true; in cs_staged_submission()
1500 struct hl_cs *cs; in cs_ioctl_default() local
1522 staged_mid ? user_sequence : ULLONG_MAX, &cs, flags, in cs_ioctl_default()
1527 *cs_seq = cs->sequence; in cs_ioctl_default()
1529 hl_debugfs_add_cs(cs); in cs_ioctl_default()
1531 rc = cs_staged_submission(hdev, cs, user_sequence, flags, in cs_ioctl_default()
1539 if (cs->staged_cs) in cs_ioctl_default()
1540 *cs_seq = cs->staged_sequence; in cs_ioctl_default()
1600 job->cs = cs; in cs_ioctl_default()
1605 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_default()
1606 cs->jobs_cnt++; in cs_ioctl_default()
1608 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_default()
1616 if (cs_needs_completion(cs) && in cs_ioctl_default()
1619 cs_get(cs); in cs_ioctl_default()
1629 cs->ctx->asid, cs->sequence, job->id, rc); in cs_ioctl_default()
1637 if (int_queues_only && cs_needs_completion(cs)) { in cs_ioctl_default()
1642 cs->ctx->asid, cs->sequence); in cs_ioctl_default()
1648 INIT_WORK(&cs->finish_work, cs_completion); in cs_ioctl_default()
1655 cs->fence->stream_master_qid_map = stream_master_qid_map; in cs_ioctl_default()
1657 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_default()
1662 cs->ctx->asid, cs->sequence, rc); in cs_ioctl_default()
1666 *signal_initial_sob_count = cs->initial_sob_count; in cs_ioctl_default()
1675 cs_rollback(hdev, cs); in cs_ioctl_default()
1680 cs_put(cs); in cs_ioctl_default()
1939 struct hl_ctx *ctx, struct hl_cs *cs, in cs_ioctl_signal_wait_create_jobs() argument
1957 if (cs->type == CS_TYPE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1971 job->cs = cs; in cs_ioctl_signal_wait_create_jobs()
1977 if ((cs->type == CS_TYPE_WAIT || cs->type == CS_TYPE_COLLECTIVE_WAIT) in cs_ioctl_signal_wait_create_jobs()
1978 && cs->encaps_signals) in cs_ioctl_signal_wait_create_jobs()
1991 cs_get(cs); in cs_ioctl_signal_wait_create_jobs()
1993 cs->jobs_in_queue_cnt[job->hw_queue_id]++; in cs_ioctl_signal_wait_create_jobs()
1994 cs->jobs_cnt++; in cs_ioctl_signal_wait_create_jobs()
1996 list_add_tail(&job->cs_node, &cs->job_list); in cs_ioctl_signal_wait_create_jobs()
2217 struct hl_cs *cs; in cs_ioctl_signal_wait() local
2377 rc = allocate_cs(hdev, ctx, cs_type, ULLONG_MAX, &cs, flags, timeout); in cs_ioctl_signal_wait()
2392 cs->signal_fence = sig_fence; in cs_ioctl_signal_wait()
2397 if (cs->encaps_signals) in cs_ioctl_signal_wait()
2398 cs->encaps_sig_hdl = encaps_sig_hdl; in cs_ioctl_signal_wait()
2401 hl_debugfs_add_cs(cs); in cs_ioctl_signal_wait()
2403 *cs_seq = cs->sequence; in cs_ioctl_signal_wait()
2406 rc = cs_ioctl_signal_wait_create_jobs(hdev, ctx, cs, q_type, in cs_ioctl_signal_wait()
2410 cs, q_idx, collective_engine_id, in cs_ioctl_signal_wait()
2422 INIT_WORK(&cs->finish_work, cs_completion); in cs_ioctl_signal_wait()
2424 rc = hl_hw_queue_schedule_cs(cs); in cs_ioctl_signal_wait()
2435 ctx->asid, cs->sequence, rc); in cs_ioctl_signal_wait()
2439 *signal_sob_addr_offset = cs->sob_addr_offset; in cs_ioctl_signal_wait()
2440 *signal_initial_sob_count = cs->initial_sob_count; in cs_ioctl_signal_wait()
2448 cs_rollback(hdev, cs); in cs_ioctl_signal_wait()
2453 cs_put(cs); in cs_ioctl_signal_wait()