Searched refs:rq_lock (Results 1 – 16 of 16) sorted by relevance
/linux-6.12.1/drivers/gpu/drm/scheduler/ |
D | sched_entity.c | 108 spin_lock_init(&entity->rq_lock); in drm_sched_entity_init() 136 spin_lock(&entity->rq_lock); in drm_sched_entity_modify_sched() 139 spin_unlock(&entity->rq_lock); in drm_sched_entity_modify_sched() 247 spin_lock(&entity->rq_lock); in drm_sched_entity_kill() 250 spin_unlock(&entity->rq_lock); in drm_sched_entity_kill() 399 spin_lock(&entity->rq_lock); in drm_sched_entity_set_priority() 401 spin_unlock(&entity->rq_lock); in drm_sched_entity_set_priority() 558 spin_lock(&entity->rq_lock); in drm_sched_entity_select_rq() 565 spin_unlock(&entity->rq_lock); in drm_sched_entity_select_rq() 606 spin_lock(&entity->rq_lock); in drm_sched_entity_push_job() [all …]
|
D | sched_main.c | 179 spin_lock(&entity->rq_lock); in drm_sched_rq_update_fifo() 190 spin_unlock(&entity->rq_lock); in drm_sched_rq_update_fifo()
|
/linux-6.12.1/drivers/net/ethernet/intel/ice/ |
D | ice_controlq.c | 540 mutex_lock(&cq->rq_lock); in ice_shutdown_rq() 562 mutex_unlock(&cq->rq_lock); in ice_shutdown_rq() 789 mutex_init(&cq->rq_lock); in ice_init_ctrlq_locks() 827 mutex_destroy(&cq->rq_lock); in ice_destroy_ctrlq_locks() 1189 mutex_lock(&cq->rq_lock); in ice_clean_rq_elem() 1257 mutex_unlock(&cq->rq_lock); in ice_clean_rq_elem()
|
D | ice_controlq.h | 101 struct mutex rq_lock; /* Receive queue lock */ member
|
/linux-6.12.1/Documentation/scheduler/ |
D | membarrier.rst | 16 rq_lock(); smp_mb__after_spinlock() in __schedule(). The barrier matches a full
|
/linux-6.12.1/include/drm/ |
D | gpu_scheduler.h | 148 spinlock_t rq_lock; member
|
/linux-6.12.1/drivers/infiniband/hw/bnxt_re/ |
D | ib_verbs.h | 89 spinlock_t rq_lock; /* protect rq */ member
|
D | ib_verbs.c | 1608 spin_lock_init(&qp->rq_lock); in bnxt_re_create_qp() 2927 spin_lock_irqsave(&qp->rq_lock, flags); in bnxt_re_post_recv() 2970 spin_unlock_irqrestore(&qp->rq_lock, flags); in bnxt_re_post_recv()
|
/linux-6.12.1/drivers/infiniband/sw/siw/ |
D | siw_verbs.c | 361 spin_lock_init(&qp->rq_lock); in siw_create_qp() 1060 spin_lock_irqsave(&qp->rq_lock, flags); in siw_post_receive() 1088 spin_unlock_irqrestore(&qp->rq_lock, flags); in siw_post_receive()
|
D | siw.h | 448 spinlock_t rq_lock; member
|
/linux-6.12.1/kernel/sched/ |
D | sched.h | 1802 static inline void rq_lock(struct rq *rq, struct rq_flags *rf) in rq_lock() function 1830 DEFINE_LOCK_GUARD_1(rq_lock, struct rq, 1831 rq_lock(_T->lock, &_T->rf), 1852 rq_lock(rq, rf); in this_rq_lock_irq()
|
D | core.c | 833 rq_lock(rq, &rf); in hrtick() 859 rq_lock(rq, &rf); in __hrtick_start() 2439 rq_lock(rq, rf); in move_queued_task() 2513 rq_lock(rq, &rf); in migration_cpu_stop() 3951 rq_lock(rq, &rf); in ttwu_queue() 5598 rq_lock(rq, &rf); in sched_tick() 6592 rq_lock(rq, &rf); in __schedule() 7858 rq_lock(rq, &rf); in __balance_push_cpu_stop()
|
D | deadline.c | 1203 scoped_guard (rq_lock, rq) { in dl_server_timer() 1772 rq_lock(rq, &rf); in inactive_task_timer() 2276 rq_lock(rq, &rf); in migrate_task_rq_dl()
|
D | rt.c | 829 rq_lock(rq, &rf); in do_sched_rt_period_timer()
|
D | ext.c | 4350 rq_lock(rq, &rf); in scx_ops_bypass() 4828 rq_lock(rq, &rf); in scx_dump_state()
|
D | fair.c | 6121 rq_lock(rq, &rf); in __cfsb_csd_unthrottle() 9653 rq_lock(rq, &rf); in attach_one_task() 9669 rq_lock(env->dst_rq, &rf); in attach_tasks()
|