Home
last modified time | relevance | path

Searched refs:sched (Results 1 – 25 of 379) sorted by relevance

12345678910>>...16

/linux-6.12.1/drivers/gpu/drm/scheduler/
Dsched_main.c108 static u32 drm_sched_available_credits(struct drm_gpu_scheduler *sched) in drm_sched_available_credits() argument
112 drm_WARN_ON(sched, check_sub_overflow(sched->credit_limit, in drm_sched_available_credits()
113 atomic_read(&sched->credit_count), in drm_sched_available_credits()
127 static bool drm_sched_can_queue(struct drm_gpu_scheduler *sched, in drm_sched_can_queue() argument
136 if (sched->ops->update_job_credits) { in drm_sched_can_queue()
137 s_job->credits = sched->ops->update_job_credits(s_job); in drm_sched_can_queue()
139 drm_WARN(sched, !s_job->credits, in drm_sched_can_queue()
146 if (drm_WARN(sched, s_job->credits > sched->credit_limit, in drm_sched_can_queue()
148 s_job->credits = sched->credit_limit; in drm_sched_can_queue()
150 return drm_sched_available_credits(sched) >= s_job->credits; in drm_sched_can_queue()
[all …]
Dsched_entity.c194 job->sched->ops->free_job(job); in drm_sched_entity_kill_jobs_work()
285 struct drm_gpu_scheduler *sched; in drm_sched_entity_flush() local
292 sched = entity->rq->sched; in drm_sched_entity_flush()
300 sched->job_scheduled, in drm_sched_entity_flush()
304 wait_event_killable(sched->job_scheduled, in drm_sched_entity_flush()
385 drm_sched_wakeup(entity->rq->sched); in drm_sched_entity_wakeup()
411 struct drm_gpu_scheduler *sched = entity->rq->sched; in drm_sched_entity_add_dependency_cb() local
427 if (!fence->error && s_fence && s_fence->sched == sched && in drm_sched_entity_add_dependency_cb()
470 if (job->sched->ops->prepare_job) in drm_sched_job_dependency()
471 return job->sched->ops->prepare_job(job, entity); in drm_sched_job_dependency()
[all …]
/linux-6.12.1/drivers/gpu/drm/xe/
Dxe_gpu_scheduler.c8 static void xe_sched_process_msg_queue(struct xe_gpu_scheduler *sched) in xe_sched_process_msg_queue() argument
10 if (!READ_ONCE(sched->base.pause_submit)) in xe_sched_process_msg_queue()
11 queue_work(sched->base.submit_wq, &sched->work_process_msg); in xe_sched_process_msg_queue()
14 static void xe_sched_process_msg_queue_if_ready(struct xe_gpu_scheduler *sched) in xe_sched_process_msg_queue_if_ready() argument
18 xe_sched_msg_lock(sched); in xe_sched_process_msg_queue_if_ready()
19 msg = list_first_entry_or_null(&sched->msgs, struct xe_sched_msg, link); in xe_sched_process_msg_queue_if_ready()
21 xe_sched_process_msg_queue(sched); in xe_sched_process_msg_queue_if_ready()
22 xe_sched_msg_unlock(sched); in xe_sched_process_msg_queue_if_ready()
26 xe_sched_get_msg(struct xe_gpu_scheduler *sched) in xe_sched_get_msg() argument
30 xe_sched_msg_lock(sched); in xe_sched_get_msg()
[all …]
Dxe_gpu_scheduler.h12 int xe_sched_init(struct xe_gpu_scheduler *sched,
20 void xe_sched_fini(struct xe_gpu_scheduler *sched);
22 void xe_sched_submission_start(struct xe_gpu_scheduler *sched);
23 void xe_sched_submission_stop(struct xe_gpu_scheduler *sched);
25 void xe_sched_submission_resume_tdr(struct xe_gpu_scheduler *sched);
27 void xe_sched_add_msg(struct xe_gpu_scheduler *sched,
29 void xe_sched_add_msg_locked(struct xe_gpu_scheduler *sched,
32 static inline void xe_sched_msg_lock(struct xe_gpu_scheduler *sched) in xe_sched_msg_lock() argument
34 spin_lock(&sched->base.job_list_lock); in xe_sched_msg_lock()
37 static inline void xe_sched_msg_unlock(struct xe_gpu_scheduler *sched) in xe_sched_msg_unlock() argument
[all …]
/linux-6.12.1/tools/perf/
Dbuiltin-sched.c130 int (*switch_event)(struct perf_sched *sched, struct evsel *evsel,
133 int (*runtime_event)(struct perf_sched *sched, struct evsel *evsel,
136 int (*wakeup_event)(struct perf_sched *sched, struct evsel *evsel,
140 int (*fork_event)(struct perf_sched *sched, union perf_event *event,
143 int (*migrate_task_event)(struct perf_sched *sched,
298 static void burn_nsecs(struct perf_sched *sched, u64 nsecs) in burn_nsecs() argument
304 } while (T1 + sched->run_measurement_overhead < T0 + nsecs); in burn_nsecs()
317 static void calibrate_run_measurement_overhead(struct perf_sched *sched) in calibrate_run_measurement_overhead() argument
324 burn_nsecs(sched, 0); in calibrate_run_measurement_overhead()
329 sched->run_measurement_overhead = min_delta; in calibrate_run_measurement_overhead()
[all …]
/linux-6.12.1/net/mptcp/
Dsched.c42 struct mptcp_sched_ops *sched, *ret = NULL; in mptcp_sched_find() local
44 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) { in mptcp_sched_find()
45 if (!strcmp(sched->name, name)) { in mptcp_sched_find()
46 ret = sched; in mptcp_sched_find()
59 struct mptcp_sched_ops *sched; in mptcp_get_available_schedulers() local
64 list_for_each_entry_rcu(sched, &mptcp_sched_list, list) { in mptcp_get_available_schedulers()
67 offs == 0 ? "" : " ", sched->name); in mptcp_get_available_schedulers()
76 int mptcp_register_scheduler(struct mptcp_sched_ops *sched) in mptcp_register_scheduler() argument
78 if (!sched->get_subflow) in mptcp_register_scheduler()
82 if (mptcp_sched_find(sched->name)) { in mptcp_register_scheduler()
[all …]
/linux-6.12.1/tools/testing/selftests/ftrace/test.d/trigger/
Dtrigger-filter.tc14 echo 'traceoff if child_pid == 0' > events/sched/sched_process_fork/trigger
23 ! echo 'traceoff if a' > events/sched/sched_process_fork/trigger
24 ! echo 'traceoff if common_pid=0' > events/sched/sched_process_fork/trigger
25 ! echo 'traceoff if common_pid==b' > events/sched/sched_process_fork/trigger
26 echo 'traceoff if common_pid == 0' > events/sched/sched_process_fork/trigger
27 echo '!traceoff' > events/sched/sched_process_fork/trigger
28 ! echo 'traceoff if common_pid == child_pid' > events/sched/sched_process_fork/trigger
29 echo 'traceoff if common_pid <= 0' > events/sched/sched_process_fork/trigger
30 echo '!traceoff' > events/sched/sched_process_fork/trigger
31 echo 'traceoff if common_pid >= 0' > events/sched/sched_process_fork/trigger
[all …]
Dtrigger-eventonoff.tc12 FEATURE=`grep enable_event events/sched/sched_process_fork/trigger`
19 echo 0 > events/sched/sched_switch/enable
20 echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
22 if [ `cat events/sched/sched_switch/enable` != '1*' ]; then
29 echo 1 > events/sched/sched_switch/enable
30 echo 'disable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
32 if [ `cat events/sched/sched_switch/enable` != '0*' ]; then
39 ! echo 'enable_event:nogroup:noevent' > events/sched/sched_process_fork/trigger
40 ! echo 'disable_event+1' > events/sched/sched_process_fork/trigger
41 echo 'enable_event:sched:sched_switch' > events/sched/sched_process_fork/trigger
[all …]
Dtrigger-multihist.tc14 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
15 echo 'hist:keys=parent_comm:vals=child_pid' >> events/sched/sched_process_fork/trigger
17 grep parent_pid events/sched/sched_process_fork/hist > /dev/null || \
19 grep child events/sched/sched_process_fork/hist > /dev/null || \
22 grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \
29 echo 'hist:name=test_hist:keys=common_pid' > events/sched/sched_process_fork/trigger
31 grep test_hist events/sched/sched_process_fork/hist > /dev/null || \
36 echo 'hist:name=test_hist:keys=common_pid' > events/sched/sched_process_exit/trigger
38 grep test_hist events/sched/sched_process_exit/hist > /dev/null || \
41 diffs=`diff events/sched/sched_process_exit/hist events/sched/sched_process_fork/hist | wc -l`
Dtrigger-traceonoff.tc13 echo 'traceoff' > events/sched/sched_process_fork/trigger
23 echo 'traceon' > events/sched/sched_process_fork/trigger
32 ! echo 'traceoff:badparam' > events/sched/sched_process_fork/trigger
33 ! echo 'traceoff+0' > events/sched/sched_process_fork/trigger
34 echo 'traceon' > events/sched/sched_process_fork/trigger
35 ! echo 'traceon' > events/sched/sched_process_fork/trigger
36 ! echo 'traceoff' > events/sched/sched_process_fork/trigger
Dtrigger-hist.tc14 echo 'hist:keys=parent_pid:vals=child_pid' > events/sched/sched_process_fork/trigger
16 grep parent_pid events/sched/sched_process_fork/hist > /dev/null || \
18 grep child events/sched/sched_process_fork/hist > /dev/null || \
25 echo 'hist:keys=parent_pid,child_pid' > events/sched/sched_process_fork/trigger
27 grep '^{ parent_pid:.*, child_pid:.*}' events/sched/sched_process_fork/hist > /dev/null || \
34 echo 'hist:keys=parent_comm' > events/sched/sched_process_fork/trigger
37 grep "parent_comm: $COMM" events/sched/sched_process_fork/hist > /dev/null || \
62 echo 'hist:keys=parent_pid,child_pid:sort=child_pid.ascending' > events/sched/sched_process_fork/tr…
73 events/sched/sched_process_fork/hist | cut -d: -f2 ` ||
Dtrigger-snapshot.tc11 FEATURE=`grep snapshot events/sched/sched_process_fork/trigger`
19 echo 1 > events/sched/sched_process_fork/enable
21 echo 'snapshot:1' > events/sched/sched_process_fork/trigger
28 echo 0 > events/sched/sched_process_fork/enable
32 ! echo "snapshot+1" > events/sched/sched_process_fork/trigger
33 echo "snapshot" > events/sched/sched_process_fork/trigger
34 ! echo "snapshot" > events/sched/sched_process_fork/trigger
Dtrigger-stacktrace.tc11 FEATURE=`grep stacktrace events/sched/sched_process_fork/trigger`
20 echo 'stacktrace' > events/sched/sched_process_fork/trigger
29 ! echo "stacktrace:foo" > events/sched/sched_process_fork/trigger
30 echo "stacktrace" > events/sched/sched_process_fork/trigger
31 ! echo "stacktrace" > events/sched/sched_process_fork/trigger
/linux-6.12.1/Documentation/scheduler/
Dindex.rst11 sched-arch
12 sched-bwc
13 sched-deadline
14 sched-design-CFS
15 sched-eevdf
16 sched-domains
17 sched-capacity
18 sched-energy
20 sched-util-clamp
21 sched-nice-design
[all …]
/linux-6.12.1/drivers/gpu/drm/nouveau/
Dnouveau_sched.c29 struct nouveau_sched *sched = args->sched; in nouveau_job_init() local
36 job->sched = sched; in nouveau_job_init()
89 ret = drm_sched_job_init(&job->base, &sched->entity, in nouveau_job_init()
121 struct nouveau_sched *sched = job->sched; in nouveau_job_done() local
123 spin_lock(&sched->job.list.lock); in nouveau_job_done()
125 spin_unlock(&sched->job.list.lock); in nouveau_job_done()
127 wake_up(&sched->job.wq); in nouveau_job_done()
276 struct nouveau_sched *sched = job->sched; in nouveau_job_submit() local
296 mutex_lock(&sched->mutex); in nouveau_job_submit()
308 spin_lock(&sched->job.list.lock); in nouveau_job_submit()
[all …]
/linux-6.12.1/drivers/gpu/drm/panthor/
Dpanthor_sched.c689 #define sched_queue_work(sched, wname) \ argument
691 if (!atomic_read(&(sched)->reset.in_progress) && \
692 !panthor_device_reset_is_pending((sched)->ptdev)) \
693 queue_work((sched)->wq, &(sched)->wname ## _work); \
705 #define sched_queue_delayed_work(sched, wname, delay) \ argument
707 if (!atomic_read(&sched->reset.in_progress) && \
708 !panthor_device_reset_is_pending((sched)->ptdev)) \
709 mod_delayed_work((sched)->wq, &(sched)->wname ## _work, delay); \
1287 struct panthor_scheduler *sched = ptdev->scheduler; in cs_slot_process_fatal_event_locked() local
1288 struct panthor_csg_slot *csg_slot = &sched->csg_slots[csg_id]; in cs_slot_process_fatal_event_locked()
[all …]
/linux-6.12.1/drivers/slimbus/
Dsched.c29 struct slim_sched *sched = &ctrl->sched; in slim_ctrl_clk_pause() local
38 mutex_lock(&sched->m_reconf); in slim_ctrl_clk_pause()
40 if (sched->clk_state == SLIM_CLK_ACTIVE) { in slim_ctrl_clk_pause()
41 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
49 ret = wait_for_completion_timeout(&sched->pause_comp, in slim_ctrl_clk_pause()
52 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
63 if (sched->clk_state == SLIM_CLK_PAUSED && ctrl->wakeup) in slim_ctrl_clk_pause()
66 sched->clk_state = SLIM_CLK_ACTIVE; in slim_ctrl_clk_pause()
67 mutex_unlock(&sched->m_reconf); in slim_ctrl_clk_pause()
73 if (ctrl->sched.clk_state == SLIM_CLK_PAUSED) { in slim_ctrl_clk_pause()
[all …]
/linux-6.12.1/net/netfilter/ipvs/
Dip_vs_sched.c61 struct ip_vs_scheduler *sched) in ip_vs_unbind_scheduler() argument
70 if (sched->done_service) in ip_vs_unbind_scheduler()
71 sched->done_service(svc); in ip_vs_unbind_scheduler()
81 struct ip_vs_scheduler *sched; in ip_vs_sched_getbyname() local
87 list_for_each_entry(sched, &ip_vs_schedulers, n_list) { in ip_vs_sched_getbyname()
91 if (sched->module && !try_module_get(sched->module)) { in ip_vs_sched_getbyname()
97 if (strcmp(sched_name, sched->name)==0) { in ip_vs_sched_getbyname()
100 return sched; in ip_vs_sched_getbyname()
102 module_put(sched->module); in ip_vs_sched_getbyname()
115 struct ip_vs_scheduler *sched; in ip_vs_scheduler_get() local
[all …]
/linux-6.12.1/Documentation/translations/zh_CN/scheduler/
Dindex.rst22 sched-arch
23 sched-bwc
24 sched-design-CFS
25 sched-domains
26 sched-capacity
27 sched-energy
29 sched-nice-design
30 sched-stats
31 sched-debug
35 sched-deadline
[all …]
/linux-6.12.1/crypto/
Dfcrypt.c54 __be32 sched[ROUNDS]; member
223 #define F_ENCRYPT(R, L, sched) \ argument
226 u.l = sched ^ R; \
242 F_ENCRYPT(X.r, X.l, ctx->sched[0x0]); in fcrypt_encrypt()
243 F_ENCRYPT(X.l, X.r, ctx->sched[0x1]); in fcrypt_encrypt()
244 F_ENCRYPT(X.r, X.l, ctx->sched[0x2]); in fcrypt_encrypt()
245 F_ENCRYPT(X.l, X.r, ctx->sched[0x3]); in fcrypt_encrypt()
246 F_ENCRYPT(X.r, X.l, ctx->sched[0x4]); in fcrypt_encrypt()
247 F_ENCRYPT(X.l, X.r, ctx->sched[0x5]); in fcrypt_encrypt()
248 F_ENCRYPT(X.r, X.l, ctx->sched[0x6]); in fcrypt_encrypt()
[all …]
/linux-6.12.1/drivers/net/wireless/ath/ath9k/
Dchannel.c261 if (likely(sc->sched.channel_switch_time)) in ath_chanctx_check_active()
263 usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
311 ictx->flush_timeout = usecs_to_jiffies(sc->sched.channel_switch_time); in ath_chanctx_check_active()
382 mod_timer(&sc->sched.timer, jiffies + timeout); in ath_chanctx_setup_timer()
399 if (ctx->active && sc->sched.extend_absence) { in ath_chanctx_handle_bmiss()
401 sc->sched.extend_absence = false; in ath_chanctx_handle_bmiss()
408 if (ctx->active && sc->sched.beacon_miss >= 2) { in ath_chanctx_handle_bmiss()
410 sc->sched.extend_absence = true; in ath_chanctx_handle_bmiss()
423 avp->offchannel_duration = sc->sched.offchannel_duration; in ath_chanctx_offchannel_noa()
451 if (sc->sched.extend_absence) in ath_chanctx_set_periodic_noa()
[all …]
/linux-6.12.1/net/sctp/
Dstream_sched.c116 void sctp_sched_ops_register(enum sctp_sched_type sched, in sctp_sched_ops_register() argument
119 sctp_sched_ops[sched] = sched_ops; in sctp_sched_ops_register()
133 struct sctp_sched_ops *sched = sctp_sched_ops_from_stream(stream); in sctp_sched_free_sched() local
137 sched->unsched_all(stream); in sctp_sched_free_sched()
142 sched->free_sid(stream, i); in sctp_sched_free_sched()
149 enum sctp_sched_type sched) in sctp_sched_set_sched() argument
151 struct sctp_sched_ops *old = asoc->outqueue.sched; in sctp_sched_set_sched()
157 if (sched > SCTP_SS_MAX) in sctp_sched_set_sched()
160 n = sctp_sched_ops[sched]; in sctp_sched_set_sched()
167 asoc->outqueue.sched = n; in sctp_sched_set_sched()
[all …]
/linux-6.12.1/include/drm/
Dgpu_scheduler.h258 struct drm_gpu_scheduler *sched; member
302 struct drm_gpu_scheduler *sched; member
340 struct drm_gpu_scheduler *sched; member
545 int drm_sched_init(struct drm_gpu_scheduler *sched,
552 void drm_sched_fini(struct drm_gpu_scheduler *sched);
575 void drm_sched_tdr_queue_imm(struct drm_gpu_scheduler *sched);
577 void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
578 bool drm_sched_wqueue_ready(struct drm_gpu_scheduler *sched);
579 void drm_sched_wqueue_stop(struct drm_gpu_scheduler *sched);
580 void drm_sched_wqueue_start(struct drm_gpu_scheduler *sched);
[all …]
/linux-6.12.1/tools/perf/Documentation/
Dperf-sched.txt1 perf-sched(1)
6 perf-sched - Tool to trace/measure scheduler properties (latencies)
11 'perf sched' {record|latency|map|replay|script|timehist}
15 There are several variants of 'perf sched':
17 'perf sched record <command>' to record the scheduling events
20 'perf sched latency' to report the per task scheduling latencies
24 perf sched record -- sleep 1
25 perf sched latency
43 'perf sched script' to see a detailed trace of the workload that
46 'perf sched replay' to simulate the workload that was recorded
[all …]
/linux-6.12.1/tools/perf/tests/shell/
Dlock_contention.sh47 perf lock record -o ${perfdata} -- perf bench sched messaging > /dev/null 2>&1
67 perf lock con -a -b -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
78 perf lock record -o- -- perf bench sched messaging 2> /dev/null | \
102 perf lock con -a -b -t -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
125 perf lock con -a -b -l -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
143 perf lock con -a -b -g -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
165 perf lock con -a -b -Y spinlock -q -- perf bench sched messaging > /dev/null 2> ${result}
197 perf lock con -a -b -L tasklist_lock -q -- perf bench sched messaging > /dev/null 2> ${result}
225 perf lock con -a -b -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${result}
253 …perf lock con -a -b -t -S unix_stream -E 1 -q -- perf bench sched messaging > /dev/null 2> ${resul…
[all …]

12345678910>>...16