Lines Matching full:work

314 	struct kwork_work *work;  in work_search()  local
318 work = container_of(node, struct kwork_work, node); in work_search()
319 cmp = work_cmp(sort_list, key, work); in work_search()
325 if (work->name == NULL) in work_search()
326 work->name = key->name; in work_search()
327 return work; in work_search()
361 struct kwork_work *work = zalloc(sizeof(*work)); in work_new() local
363 if (work == NULL) { in work_new()
364 pr_err("Failed to zalloc kwork work\n"); in work_new()
369 INIT_LIST_HEAD(&work->atom_list[i]); in work_new()
371 work->id = key->id; in work_new()
372 work->cpu = key->cpu; in work_new()
373 work->name = key->name; in work_new()
374 work->class = key->class; in work_new()
375 return work; in work_new()
382 struct kwork_work *work = work_search(root, key, sort_list); in work_findnew() local
384 if (work != NULL) in work_findnew()
385 return work; in work_findnew()
387 work = work_new(key); in work_findnew()
388 if (work) in work_findnew()
389 work_insert(root, work, sort_list); in work_findnew()
391 return work; in work_findnew()
408 struct kwork_work *work) in profile_name_match() argument
410 if (kwork->profile_name && work->name && in profile_name_match()
411 (strcmp(work->name, kwork->profile_name) != 0)) { in profile_name_match()
419 struct kwork_work *work, in profile_event_match() argument
422 int cpu = work->cpu; in profile_event_match()
438 !profile_name_match(kwork, work)) { in profile_event_match()
457 struct kwork_work *work, key; in work_push_atom() local
466 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_push_atom()
467 if (work == NULL) { in work_push_atom()
472 if (!profile_event_match(kwork, work, sample)) { in work_push_atom()
478 dst_atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_push_atom()
487 *ret_work = work; in work_push_atom()
490 last_atom = list_last_entry_or_null(&work->atom_list[src_type], in work_push_atom()
500 list_add_tail(&atom->list, &work->atom_list[src_type]); in work_push_atom()
515 struct kwork_work *work, key; in work_pop_atom() local
520 work = work_findnew(&class->work_root, &key, &kwork->cmp_id); in work_pop_atom()
522 *ret_work = work; in work_pop_atom()
524 if (work == NULL) in work_pop_atom()
527 if (!profile_event_match(kwork, work, sample)) in work_pop_atom()
530 atom = list_last_entry_or_null(&work->atom_list[dst_type], in work_pop_atom()
537 list_add_tail(&src_atom->list, &work->atom_list[src_type]); in work_pop_atom()
550 struct kwork_work *work; in find_work_by_id() local
554 work = rb_entry(next, struct kwork_work, node); in find_work_by_id()
555 if ((cpu != -1 && work->id == id && work->cpu == cpu) || in find_work_by_id()
556 (cpu == -1 && work->id == id)) in find_work_by_id()
557 return work; in find_work_by_id()
578 static void report_update_exit_event(struct kwork_work *work, in report_update_exit_event() argument
588 if ((delta > work->max_runtime) || in report_update_exit_event()
589 (work->max_runtime == 0)) { in report_update_exit_event()
590 work->max_runtime = delta; in report_update_exit_event()
591 work->max_runtime_start = entry_time; in report_update_exit_event()
592 work->max_runtime_end = exit_time; in report_update_exit_event()
594 work->total_runtime += delta; in report_update_exit_event()
595 work->nr_atoms++; in report_update_exit_event()
617 struct kwork_work *work = NULL; in report_exit_event() local
621 machine, &work); in report_exit_event()
622 if (work == NULL) in report_exit_event()
626 report_update_exit_event(work, atom, sample); in report_exit_event()
633 static void latency_update_entry_event(struct kwork_work *work, in latency_update_entry_event() argument
643 if ((delta > work->max_latency) || in latency_update_entry_event()
644 (work->max_latency == 0)) { in latency_update_entry_event()
645 work->max_latency = delta; in latency_update_entry_event()
646 work->max_latency_start = raise_time; in latency_update_entry_event()
647 work->max_latency_end = entry_time; in latency_update_entry_event()
649 work->total_latency += delta; in latency_update_entry_event()
650 work->nr_atoms++; in latency_update_entry_event()
672 struct kwork_work *work = NULL; in latency_entry_event() local
676 machine, &work); in latency_entry_event()
677 if (work == NULL) in latency_entry_event()
681 latency_update_entry_event(work, atom, sample); in latency_entry_event()
738 struct kwork_work *work, in timehist_print_event() argument
763 printf(" [%0*d] ", PRINT_CPU_WIDTH, work->cpu); in timehist_print_event()
768 if (work->class && work->class->work_name) { in timehist_print_event()
769 work->class->work_name(work, kwork_name, in timehist_print_event()
831 struct kwork_work *work = NULL; in timehist_entry_event() local
835 machine, &work, true); in timehist_entry_event()
839 if (work != NULL) in timehist_entry_event()
852 struct kwork_work *work = NULL; in timehist_exit_event() local
865 machine, &work); in timehist_exit_event()
866 if (work == NULL) { in timehist_exit_event()
872 work->nr_atoms++; in timehist_exit_event()
873 timehist_print_event(kwork, work, atom, sample, &al); in timehist_exit_event()
882 static void top_update_runtime(struct kwork_work *work, in top_update_runtime() argument
892 work->total_runtime += delta; in top_update_runtime()
913 struct kwork_work *work, *sched_work; in top_exit_event() local
919 machine, &work); in top_exit_event()
920 if (!work) in top_exit_event()
927 work->id, work->cpu); in top_exit_event()
929 top_update_runtime(work, atom, sample); in top_exit_event()
944 struct kwork_work *work; in top_sched_switch_event() local
948 machine, &work); in top_sched_switch_event()
949 if (!work) in top_sched_switch_event()
953 top_update_runtime(work, atom, sample); in top_sched_switch_event()
1006 struct kwork_work *work, in irq_work_init() argument
1012 work->class = class; in irq_work_init()
1013 work->cpu = sample->cpu; in irq_work_init()
1016 work->id = evsel__intval_common(evsel, sample, "common_pid"); in irq_work_init()
1017 work->name = NULL; in irq_work_init()
1019 work->id = evsel__intval(evsel, sample, "irq"); in irq_work_init()
1020 work->name = evsel__strval(evsel, sample, "name"); in irq_work_init()
1024 static void irq_work_name(struct kwork_work *work, char *buf, int len) in irq_work_name() argument
1026 snprintf(buf, len, "%s:%" PRIu64 "", work->name, work->id); in irq_work_name()
1133 struct kwork_work *work, in softirq_work_init() argument
1141 work->class = class; in softirq_work_init()
1142 work->cpu = sample->cpu; in softirq_work_init()
1145 work->id = evsel__intval_common(evsel, sample, "common_pid"); in softirq_work_init()
1146 work->name = NULL; in softirq_work_init()
1149 work->id = num; in softirq_work_init()
1150 work->name = evsel__softirq_name(evsel, num); in softirq_work_init()
1154 static void softirq_work_name(struct kwork_work *work, char *buf, int len) in softirq_work_name() argument
1156 snprintf(buf, len, "(s)%s:%" PRIu64 "", work->name, work->id); in softirq_work_name()
1233 struct kwork_work *work, in workqueue_work_init() argument
1243 work->class = class; in workqueue_work_init()
1244 work->cpu = sample->cpu; in workqueue_work_init()
1245 work->id = evsel__intval(evsel, sample, "work"); in workqueue_work_init()
1246 work->name = function_addr == 0 ? NULL : in workqueue_work_init()
1250 static void workqueue_work_name(struct kwork_work *work, char *buf, int len) in workqueue_work_name() argument
1252 if (work->name != NULL) in workqueue_work_name()
1253 snprintf(buf, len, "(w)%s", work->name); in workqueue_work_name()
1255 snprintf(buf, len, "(w)0x%" PRIx64, work->id); in workqueue_work_name()
1301 struct kwork_work *work, in sched_work_init() argument
1307 work->class = class; in sched_work_init()
1308 work->cpu = sample->cpu; in sched_work_init()
1311 work->id = evsel__intval(evsel, sample, "prev_pid"); in sched_work_init()
1312 work->name = strdup(evsel__strval(evsel, sample, "prev_comm")); in sched_work_init()
1314 work->id = evsel__intval(evsel, sample, "next_pid"); in sched_work_init()
1315 work->name = strdup(evsel__strval(evsel, sample, "next_comm")); in sched_work_init()
1319 static void sched_work_name(struct kwork_work *work, char *buf, int len) in sched_work_name() argument
1321 snprintf(buf, len, "%s", work->name); in sched_work_name()
1346 static int report_print_work(struct perf_kwork *kwork, struct kwork_work *work) in report_print_work() argument
1358 if (work->class && work->class->work_name) { in report_print_work()
1359 work->class->work_name(work, kwork_name, in report_print_work()
1369 ret += printf(" %0*d |", PRINT_CPU_WIDTH, work->cpu); in report_print_work()
1377 (double)work->total_runtime / NSEC_PER_MSEC); in report_print_work()
1381 (double)work->total_latency / in report_print_work()
1382 work->nr_atoms / NSEC_PER_MSEC); in report_print_work()
1388 ret += printf(" %*" PRIu64 " |", PRINT_COUNT_WIDTH, work->nr_atoms); in report_print_work()
1394 timestamp__scnprintf_usec(work->max_runtime_start, in report_print_work()
1397 timestamp__scnprintf_usec(work->max_runtime_end, in report_print_work()
1402 (double)work->max_runtime / NSEC_PER_MSEC, in report_print_work()
1410 timestamp__scnprintf_usec(work->max_latency_start, in report_print_work()
1413 timestamp__scnprintf_usec(work->max_latency_end, in report_print_work()
1418 (double)work->max_latency / NSEC_PER_MSEC, in report_print_work()
1637 static int top_print_work(struct perf_kwork *kwork __maybe_unused, struct kwork_work *work) in top_print_work() argument
1646 ret += printf(" %*" PRIu64 " ", PRINT_PID_WIDTH, work->id); in top_print_work()
1652 ret += printf(" %*d ", PRINT_PID_WIDTH, work->tgid); in top_print_work()
1659 (double)work->cpu_usage / 100); in top_print_work()
1666 (double)work->total_runtime / NSEC_PER_MSEC); in top_print_work()
1673 work->is_kthread ? "[" : "", in top_print_work()
1674 work->name, in top_print_work()
1675 work->is_kthread ? "]" : ""); in top_print_work()
1677 ret += printf(" %-*s", PRINT_TASK_NAME_WIDTH, work->name); in top_print_work()
1837 struct kwork_work *work) in process_skipped_events() argument
1843 count = nr_list_entry(&work->atom_list[i]); in process_skipped_events()
1853 struct kwork_work *work = NULL; in perf_kwork_add_work() local
1855 work = work_new(key); in perf_kwork_add_work()
1856 if (work == NULL) in perf_kwork_add_work()
1859 work_insert(&class->work_root, work, &kwork->cmp_id); in perf_kwork_add_work()
1860 return work; in perf_kwork_add_work()
1905 struct kwork_work *work; in perf_kwork__report() local
1922 work = rb_entry(next, struct kwork_work, node); in perf_kwork__report()
1923 process_skipped_events(kwork, work); in perf_kwork__report()
1925 if (work->nr_atoms != 0) { in perf_kwork__report()
1926 report_print_work(kwork, work); in perf_kwork__report()
1928 kwork->all_runtime += work->total_runtime; in perf_kwork__report()
1929 kwork->all_count += work->nr_atoms; in perf_kwork__report()
1998 struct kwork_work *work; in top_calc_total_runtime() local
2008 work = rb_entry(next, struct kwork_work, node); in top_calc_total_runtime()
2009 BUG_ON(work->cpu >= MAX_NR_CPUS); in top_calc_total_runtime()
2010 stat->cpus_runtime[work->cpu].total += work->total_runtime; in top_calc_total_runtime()
2011 stat->cpus_runtime[MAX_NR_CPUS].total += work->total_runtime; in top_calc_total_runtime()
2017 struct kwork_work *work) in top_calc_idle_time() argument
2021 if (work->id == 0) { in top_calc_idle_time()
2022 stat->cpus_runtime[work->cpu].idle += work->total_runtime; in top_calc_idle_time()
2023 stat->cpus_runtime[MAX_NR_CPUS].idle += work->total_runtime; in top_calc_idle_time()
2029 struct kwork_work *work) in top_calc_irq_runtime() argument
2034 stat->cpus_runtime[work->cpu].irq += work->total_runtime; in top_calc_irq_runtime()
2035 stat->cpus_runtime[MAX_NR_CPUS].irq += work->total_runtime; in top_calc_irq_runtime()
2037 stat->cpus_runtime[work->cpu].softirq += work->total_runtime; in top_calc_irq_runtime()
2038 stat->cpus_runtime[MAX_NR_CPUS].softirq += work->total_runtime; in top_calc_irq_runtime()
2043 struct kwork_work *work) in top_subtract_irq_runtime() argument
2056 work->id, work->cpu); in top_subtract_irq_runtime()
2060 if (work->total_runtime > data->total_runtime) { in top_subtract_irq_runtime()
2061 work->total_runtime -= data->total_runtime; in top_subtract_irq_runtime()
2070 struct kwork_work *work; in top_calc_cpu_usage() local
2080 work = rb_entry(next, struct kwork_work, node); in top_calc_cpu_usage()
2082 if (work->total_runtime == 0) in top_calc_cpu_usage()
2085 __set_bit(work->cpu, stat->all_cpus_bitmap); in top_calc_cpu_usage()
2087 top_subtract_irq_runtime(kwork, work); in top_calc_cpu_usage()
2089 work->cpu_usage = work->total_runtime * 10000 / in top_calc_cpu_usage()
2090 stat->cpus_runtime[work->cpu].total; in top_calc_cpu_usage()
2092 top_calc_idle_time(kwork, work); in top_calc_cpu_usage()
2099 struct kwork_work *work) in top_calc_load_runtime() argument
2103 if (work->id != 0) { in top_calc_load_runtime()
2104 stat->cpus_runtime[work->cpu].load += work->total_runtime; in top_calc_load_runtime()
2105 stat->cpus_runtime[MAX_NR_CPUS].load += work->total_runtime; in top_calc_load_runtime()
2150 struct kwork_work *work; in perf_kwork__top_report() local
2159 work = rb_entry(next, struct kwork_work, node); in perf_kwork__top_report()
2160 process_skipped_events(kwork, work); in perf_kwork__top_report()
2162 if (work->total_runtime == 0) in perf_kwork__top_report()
2165 top_print_work(kwork, work); in perf_kwork__top_report()