Lines Matching refs:task

105 static __always_inline void update_task_info(struct task_struct *task, __u32 cpu)  in update_task_info()  argument
108 .pid = task->pid, in update_task_info()
114 .tgid = task->tgid, in update_task_info()
115 .is_kthread = task->flags & PF_KTHREAD ? 1 : 0, in update_task_info()
117 BPF_CORE_READ_STR_INTO(&data.comm, task, comm); in update_task_info()
139 static void on_sched_out(struct task_struct *task, __u64 ts, __u32 cpu) in on_sched_out() argument
144 pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, 0); in on_sched_out()
152 .pid = task->pid, in on_sched_out()
153 .task_p = (__u64)task, in on_sched_out()
157 update_task_info(task, cpu); in on_sched_out()
160 static void on_sched_in(struct task_struct *task, __u64 ts) in on_sched_in() argument
164 pelem = bpf_task_storage_get(&kwork_top_task_time, task, NULL, in on_sched_in()
197 struct task_struct *task; in on_irq_handler_entry() local
209 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_entry()
210 if (!task) in on_irq_handler_entry()
215 .pid = BPF_CORE_READ(task, pid), in on_irq_handler_entry()
216 .task_p = (__u64)task, in on_irq_handler_entry()
232 struct task_struct *task; in on_irq_handler_exit() local
245 task = (struct task_struct *)bpf_get_current_task(); in on_irq_handler_exit()
246 if (!task) in on_irq_handler_exit()
251 .pid = BPF_CORE_READ(task, pid), in on_irq_handler_exit()
252 .task_p = (__u64)task, in on_irq_handler_exit()
269 struct task_struct *task; in on_softirq_entry() local
281 task = (struct task_struct *)bpf_get_current_task(); in on_softirq_entry()
282 if (!task) in on_softirq_entry()
287 .pid = BPF_CORE_READ(task, pid), in on_softirq_entry()
288 .task_p = (__u64)task, in on_softirq_entry()
304 struct task_struct *task; in on_softirq_exit() local
317 task = (struct task_struct *)bpf_get_current_task(); in on_softirq_exit()
318 if (!task) in on_softirq_exit()
323 .pid = BPF_CORE_READ(task, pid), in on_softirq_exit()
324 .task_p = (__u64)task, in on_softirq_exit()