Home
last modified time | relevance | path

Searched full:scheduled (Results 1 – 25 of 949) sorted by relevance

12345678910>>...38

/linux-6.12.1/include/drm/
Dgpu_scheduler.h102 * Runqueue on which this entity is currently scheduled.
114 * be scheduled on any scheduler on this list.
170 * &drm_sched_fence.scheduled uses the fence_context but
199 * Points to the finished fence of the last scheduled job. Only written
244 * struct drm_sched_rq - queue of entities to be scheduled.
248 * @entities: list of the entities to be scheduled.
249 * @current_entity: the entity which is to be scheduled.
269 * @scheduled: this fence is what will be signaled by the scheduler
270 * when the job is scheduled.
272 struct dma_fence scheduled; member
[all …]
/linux-6.12.1/drivers/md/dm-vdo/
Daction-manager.c237 * If the manager is not operating normally, the action will not be scheduled.
239 * Return: true if an action was scheduled.
283 * @parent: The object to notify once the action is complete or if the action can not be scheduled;
287 * action completes. If there is already a pending action, this action will not be scheduled, and,
291 * Return: true if the action was scheduled.
310 * @parent: The object to notify once the action is complete or if the action can not be scheduled;
315 * scheduled, and, if it has a parent, that parent will be notified. At least one of the preamble,
318 * Return: true if the action was scheduled.
341 * @parent: The object to notify once the action is complete or if the action can not be scheduled;
346 * scheduled, and, if it has a parent, that parent will be notified. At least one of the preamble,
[all …]
/linux-6.12.1/drivers/gpu/drm/scheduler/
Dsched_fence.c66 /* Set the parent before signaling the scheduled fence, such that, in drm_sched_fence_scheduled()
68 * been scheduled (which is the case for drivers delegating waits in drm_sched_fence_scheduled()
75 dma_fence_signal(&fence->scheduled); in drm_sched_fence_scheduled()
141 * Drop the extra reference from the scheduled fence to the base fence.
147 dma_fence_put(&fence->scheduled); in drm_sched_fence_release_finished()
197 return container_of(f, struct drm_sched_fence, scheduled); in to_drm_sched_fence()
228 dma_fence_init(&fence->scheduled, &drm_sched_fence_ops_scheduled, in drm_sched_fence_init()
Dsched_entity.c168 * drm_sched_entity_error - return error of last scheduled job
171 * Opportunistically return the error of the last scheduled job. Result can
211 if (s_fence && f == &s_fence->scheduled) { in drm_sched_entity_kill_jobs_cb()
212 /* The dependencies array had a reference on the scheduled in drm_sched_entity_kill_jobs_cb()
221 * had on the scheduled fence. in drm_sched_entity_kill_jobs_cb()
223 dma_fence_put(&s_fence->scheduled); in drm_sched_entity_kill_jobs_cb()
418 * Fence is a scheduled/finished fence from a job in drm_sched_entity_add_dependency_cb()
432 * it to be scheduled in drm_sched_entity_add_dependency_cb()
434 fence = dma_fence_get(&s_fence->scheduled); in drm_sched_entity_add_dependency_cb()
441 /* Ignore it when it is already scheduled */ in drm_sched_entity_add_dependency_cb()
/linux-6.12.1/drivers/net/wireless/broadcom/brcm80211/brcmfmac/
Dpno.h17 * brcmf_pno_start_sched_scan - initiate scheduled scan on device.
20 * @req: configuration parameters for scheduled scan.
26 * brcmf_pno_stop_sched_scan - terminate scheduled scan on device.
34 * brcmf_pno_wiphy_params - fill scheduled scan parameters in wiphy instance.
/linux-6.12.1/include/linux/
Dposix-timers_types.h64 * @work: The task work to be scheduled
66 * @scheduled: @work has been scheduled already, no further processing
71 unsigned int scheduled; member
/linux-6.12.1/Documentation/arch/powerpc/
Dpmu-ebb.rst44 user process. This means once an EBB event is scheduled on the PMU, no non-EBB
56 first will be scheduled and the other will be put in error state. See the
84 userspace is able to reliably determine which PMC the event is scheduled on.
95 guarantee that it has been scheduled on the PMU. To ensure that the EBB event
96 has been scheduled on the PMU, you must perform a read() on the event. If the
97 read() returns EOF, then the event has not been scheduled and EBBs are not
/linux-6.12.1/net/mptcp/
Dsched.c141 bool scheduled) in mptcp_subflow_set_scheduled() argument
143 WRITE_ONCE(subflow->scheduled, scheduled); in mptcp_subflow_set_scheduled()
165 if (READ_ONCE(subflow->scheduled)) in mptcp_sched_get_send()
187 if (READ_ONCE(subflow->scheduled)) in mptcp_sched_get_retrans()
/linux-6.12.1/net/sctp/
Dstream_sched_prio.c64 /* Look into scheduled priorities first, as they are sorted and in sctp_sched_prio_get_head()
65 * we can find it fast IF it's scheduled. in sctp_sched_prio_get_head()
105 bool scheduled = false; in sctp_sched_prio_unsched() local
110 /* Scheduled */ in sctp_sched_prio_unsched()
111 scheduled = true; in sctp_sched_prio_unsched()
127 return scheduled; in sctp_sched_prio_unsched()
137 /* Nothing to do if already scheduled */ in sctp_sched_prio_sched()
/linux-6.12.1/drivers/gpu/drm/
Ddrm_vblank_work.c102 * If @work is already scheduled, this function will reschedule said work
106 * %1 if @work was successfully (re)scheduled, %0 if it was either already
107 * scheduled or cancelled, or a negative error code on failure.
134 /* Already scheduled w/ same vbl count */ in drm_vblank_work_schedule()
175 * Cancel an already scheduled vblank work and wait for its
178 * On return, @work is guaranteed to no longer be scheduled or running, even
215 * drm_vblank_work_flush - wait for a scheduled vblank work to finish
/linux-6.12.1/drivers/net/wireless/intel/iwlwifi/fw/api/
Dtime-event.h50 * the first fragment is scheduled.
52 * the first 2 fragments are scheduled.
58 * scheduled.
116 * the first fragment is scheduled.
118 * the first 2 fragments are scheduled.
124 * scheduled.
253 * @status: true if scheduled, false otherwise (not executed)
446 * Note: the session protection will always be scheduled to start as
/linux-6.12.1/drivers/usb/host/
Dxhci-mtk.h71 * (@repeat==1) scheduled within the interval
82 * scheduled first time within the interval
84 * scheduled within a interval. in the simple algorithm, only
88 * @pkts: number of packets to be transferred in the scheduled uframes
/linux-6.12.1/drivers/infiniband/sw/rxe/
Drxe_task.c25 /* Check if task is idle i.e. not running, not scheduled in
54 * scheduled in the work queue. This routine is
170 "%ld tasks scheduled, %ld tasks done\n", in do_task()
226 /* now the task cannot be scheduled or run just wait in rxe_cleanup_task()
227 * for the previously scheduled tasks to finish. in rxe_cleanup_task()
/linux-6.12.1/drivers/soc/fsl/dpio/
Dqbman-portal.h398 * qbman_swp_fq_schedule() - Move the fq to the scheduled state
400 * @fqid: the index of frame queue to be scheduled
413 * qbman_swp_fq_force() - Force the FQ to fully scheduled state
417 * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled
450 * XOFF FQs will remain in the tenatively-scheduled state, even when
451 * non-empty, meaning they won't be selected for scheduled dequeuing.
452 * If a FQ is changed to XOFF after it had already become truly-scheduled
/linux-6.12.1/drivers/staging/media/deprecated/atmel/
DKconfig21 This driver is deprecated and is scheduled for removal by
40 This driver is deprecated and is scheduled for removal by
/linux-6.12.1/drivers/gpu/drm/i915/
Di915_priolist_types.h23 /* Interactive workload, scheduled for immediate pageflipping */
32 * another context. They get scheduled with their default priority and
Di915_vma_resource.h60 * is not considered published until unbind is scheduled, and as such it
61 * is illegal to access this fence before scheduled unbind other than
66 * is scheduled.
92 * taken when the unbind is scheduled.
/linux-6.12.1/arch/alpha/kernel/
Dperf_event.c36 /* Number of events scheduled; also number entries valid in arrays below. */
40 /* Events currently scheduled. */
42 /* Event type of each scheduled event. */
44 /* Current index of each scheduled event; if not yet determined
149 * Check that a group of events can be simultaneously scheduled on to the
369 * Check that a group of events can be simultaneously scheduled on to the PMU.
387 * If new events have been scheduled then update cpuc with the new
637 * scheduled on to the PMU. At that point the code to programme the in __hw_perf_event_init()
646 * be scheduled on to the PMU. in __hw_perf_event_init()
727 /* Update cpuc with information from any new scheduled events. */ in alpha_pmu_enable()
/linux-6.12.1/Documentation/scheduler/
Dsched-ext.rst50 treated as ``SCHED_NORMAL`` and scheduled by CFS until the BPF scheduler is
55 ``SCHED_EXT`` tasks are scheduled by sched_ext.
58 set in ``ops->flags``, only tasks with the ``SCHED_EXT`` policy are scheduled
60 ``SCHED_IDLE`` policies are scheduled by CFS.
217 The following briefly shows how a waking task is scheduled and executed.
/linux-6.12.1/kernel/
Dasync.c28 The async core will assign each scheduled event such a sequence cookie and
35 operations that were scheduled prior to the operation corresponding with the
38 Subsystem/driver initialization code that scheduled asynchronous probe
256 * If the asynchronous execution of @func is scheduled successfully, return
Dworkqueue_internal.h42 struct list_head scheduled; /* L: scheduled works */ member
/linux-6.12.1/drivers/gpu/drm/amd/amdgpu/
Damdgpu_job.c290 struct dma_fence *fence = &leader->base.s_fence->scheduled; in amdgpu_job_set_gang_leader()
310 if (job->gang_submit != &job->base.s_fence->scheduled) in amdgpu_job_free()
423 /* Signal all jobs not yet scheduled */ in amdgpu_job_stop_all_jobs_on_sched()
431 dma_fence_signal(&s_fence->scheduled); in amdgpu_job_stop_all_jobs_on_sched()
439 /* Signal all jobs already scheduled to HW */ in amdgpu_job_stop_all_jobs_on_sched()
/linux-6.12.1/arch/s390/pci/
Dpci_irq.c179 atomic_t scheduled; member
185 atomic_t *scheduled = data; in zpci_handle_remote_irq() local
189 } while (atomic_dec_return(scheduled)); in zpci_handle_remote_irq()
212 if (atomic_inc_return(&cpu_data->scheduled) > 1) in zpci_handle_fallback_irq()
215 INIT_CSD(&cpu_data->csd, zpci_handle_remote_irq, &cpu_data->scheduled); in zpci_handle_fallback_irq()
/linux-6.12.1/Documentation/admin-guide/hw-vuln/
Dcore-scheduling.rst35 Using this feature, userspace defines groups of tasks that can be co-scheduled
122 tasks selected to be scheduled are of the highest priority in a core. However,
128 by the scheduler (idle thread is scheduled to run).
224 - Gang scheduling: Requirements for a group of tasks that needs to be scheduled
/linux-6.12.1/drivers/gpu/drm/ci/
Dgitlab-ci.yml162 # Rule to filter for only scheduled pipelines.
165 - if: &is-scheduled-pipeline '$CI_PIPELINE_SOURCE == "schedule"'
168 # Generic rule to not run the job during scheduled pipelines. Jobs that aren't
172 - if: *is-scheduled-pipeline

12345678910>>...38