Lines Matching full:event

211 static bool is_kernel_event(struct perf_event *event)  in is_kernel_event()  argument
213 return READ_ONCE(event->owner) == TASK_TOMBSTONE; in is_kernel_event()
233 * - removing the last event from a task ctx; this is relatively straight
236 * - adding the first event to a task ctx; this is tricky because we cannot
247 struct perf_event *event; member
255 struct perf_event *event = efs->event; in event_function() local
256 struct perf_event_context *ctx = event->ctx; in event_function()
291 efs->func(event, cpuctx, ctx, efs->data); in event_function()
298 static void event_function_call(struct perf_event *event, event_f func, void *data) in event_function_call() argument
300 struct perf_event_context *ctx = event->ctx; in event_function_call()
304 .event = event, in event_function_call()
309 if (!event->parent) { in event_function_call()
311 * If this is a !child event, we must hold ctx::mutex to in event_function_call()
312 * stabilize the event->ctx relation. See in event_function_call()
319 cpu_function_call(event->cpu, event_function, &efs); in event_function_call()
345 func(event, NULL, ctx, data); in event_function_call()
355 static void event_function_local(struct perf_event *event, event_f func, void *data) in event_function_local() argument
357 struct perf_event_context *ctx = event->ctx; in event_function_local()
394 func(event, cpuctx, ctx, data); in event_function_local()
447 * perf event paranoia level:
459 * max perf event sample rate
609 static u64 perf_event_time(struct perf_event *event);
618 static inline u64 perf_event_clock(struct perf_event *event) in perf_event_clock() argument
620 return event->clock(); in perf_event_clock()
624 * State based event timekeeping...
626 * The basic idea is to use event->state to determine which (if any) time
631 * Event groups make things a little more complicated, but not terribly so. The
646 __perf_effective_state(struct perf_event *event) in __perf_effective_state() argument
648 struct perf_event *leader = event->group_leader; in __perf_effective_state()
653 return event->state; in __perf_effective_state()
657 __perf_update_times(struct perf_event *event, u64 now, u64 *enabled, u64 *running) in __perf_update_times() argument
659 enum perf_event_state state = __perf_effective_state(event); in __perf_update_times()
660 u64 delta = now - event->tstamp; in __perf_update_times()
662 *enabled = event->total_time_enabled; in __perf_update_times()
666 *running = event->total_time_running; in __perf_update_times()
671 static void perf_event_update_time(struct perf_event *event) in perf_event_update_time() argument
673 u64 now = perf_event_time(event); in perf_event_update_time()
675 __perf_update_times(event, now, &event->total_time_enabled, in perf_event_update_time()
676 &event->total_time_running); in perf_event_update_time()
677 event->tstamp = now; in perf_event_update_time()
689 perf_event_set_state(struct perf_event *event, enum perf_event_state state) in perf_event_set_state() argument
691 if (event->state == state) in perf_event_set_state()
694 perf_event_update_time(event); in perf_event_set_state()
699 if ((event->state < 0) ^ (state < 0)) in perf_event_set_state()
700 perf_event_update_sibling_time(event); in perf_event_set_state()
702 WRITE_ONCE(event->state, state); in perf_event_set_state()
752 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
756 /* @event doesn't care about cgroup */ in perf_cgroup_match()
757 if (!event->cgrp) in perf_cgroup_match()
765 * Cgroup scoping is recursive. An event enabled for a cgroup is in perf_cgroup_match()
767 * cgroup is a descendant of @event's (the test covers identity in perf_cgroup_match()
771 event->cgrp->css.cgroup); in perf_cgroup_match()
774 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
776 css_put(&event->cgrp->css); in perf_detach_cgroup()
777 event->cgrp = NULL; in perf_detach_cgroup()
780 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
782 return event->cgrp != NULL; in is_cgroup_event()
785 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
789 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time()
793 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
797 t = per_cpu_ptr(event->cgrp->info, event->cpu); in perf_cgroup_event_time_now()
835 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
843 if (!is_cgroup_event(event)) in update_cgrp_time_from_event()
846 info = this_cpu_ptr(event->cgrp->info); in update_cgrp_time_from_event()
889 * cpuctx->cgrp is set when the first cgroup event enabled, in perf_cgroup_switch()
890 * and is cleared when the last cgroup event disabled. in perf_cgroup_switch()
922 static int perf_cgroup_ensure_storage(struct perf_event *event, in perf_cgroup_ensure_storage() argument
963 static inline int perf_cgroup_connect(int fd, struct perf_event *event, in perf_cgroup_connect() argument
982 ret = perf_cgroup_ensure_storage(event, css); in perf_cgroup_connect()
987 event->cgrp = cgrp; in perf_cgroup_connect()
995 perf_detach_cgroup(event); in perf_cgroup_connect()
1004 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1008 if (!is_cgroup_event(event)) in perf_cgroup_event_enable()
1011 event->pmu_ctx->nr_cgroups++; in perf_cgroup_event_enable()
1026 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1030 if (!is_cgroup_event(event)) in perf_cgroup_event_disable()
1033 event->pmu_ctx->nr_cgroups--; in perf_cgroup_event_disable()
1050 perf_cgroup_match(struct perf_event *event) in perf_cgroup_match() argument
1055 static inline void perf_detach_cgroup(struct perf_event *event) in perf_detach_cgroup() argument
1058 static inline int is_cgroup_event(struct perf_event *event) in is_cgroup_event() argument
1063 static inline void update_cgrp_time_from_event(struct perf_event *event) in update_cgrp_time_from_event() argument
1072 static inline int perf_cgroup_connect(pid_t pid, struct perf_event *event, in perf_cgroup_connect() argument
1084 static inline u64 perf_cgroup_event_time(struct perf_event *event) in perf_cgroup_event_time() argument
1089 static inline u64 perf_cgroup_event_time_now(struct perf_event *event, u64 now) in perf_cgroup_event_time_now() argument
1095 perf_cgroup_event_enable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_enable() argument
1100 perf_cgroup_event_disable(struct perf_event *event, struct perf_event_context *ctx) in perf_cgroup_event_disable() argument
1268 * because the sys_perf_event_open() case will install a new event and break
1279 * quiesce the event, after which we can install it in the new location. This
1280 * means that only external vectors (perf_fops, prctl) can perturb the event
1284 * However; because event->ctx can change while we're waiting to acquire
1304 perf_event_ctx_lock_nested(struct perf_event *event, int nesting) in perf_event_ctx_lock_nested() argument
1310 ctx = READ_ONCE(event->ctx); in perf_event_ctx_lock_nested()
1318 if (event->ctx != ctx) { in perf_event_ctx_lock_nested()
1328 perf_event_ctx_lock(struct perf_event *event) in perf_event_ctx_lock() argument
1330 return perf_event_ctx_lock_nested(event, 0); in perf_event_ctx_lock()
1333 static void perf_event_ctx_unlock(struct perf_event *event, in perf_event_ctx_unlock() argument
1359 static u32 perf_event_pid_type(struct perf_event *event, struct task_struct *p, in perf_event_pid_type() argument
1366 if (event->parent) in perf_event_pid_type()
1367 event = event->parent; in perf_event_pid_type()
1369 nr = __task_pid_nr_ns(p, type, event->ns); in perf_event_pid_type()
1376 static u32 perf_event_pid(struct perf_event *event, struct task_struct *p) in perf_event_pid() argument
1378 return perf_event_pid_type(event, p, PIDTYPE_TGID); in perf_event_pid()
1381 static u32 perf_event_tid(struct perf_event *event, struct task_struct *p) in perf_event_tid() argument
1383 return perf_event_pid_type(event, p, PIDTYPE_PID); in perf_event_tid()
1387 * If we inherit events we want to return the parent event id
1390 static u64 primary_event_id(struct perf_event *event) in primary_event_id() argument
1392 u64 id = event->id; in primary_event_id()
1394 if (event->parent) in primary_event_id()
1395 id = event->parent->id; in primary_event_id()
1515 static u64 perf_event_time(struct perf_event *event) in perf_event_time() argument
1517 struct perf_event_context *ctx = event->ctx; in perf_event_time()
1522 if (is_cgroup_event(event)) in perf_event_time()
1523 return perf_cgroup_event_time(event); in perf_event_time()
1528 static u64 perf_event_time_now(struct perf_event *event, u64 now) in perf_event_time_now() argument
1530 struct perf_event_context *ctx = event->ctx; in perf_event_time_now()
1535 if (is_cgroup_event(event)) in perf_event_time_now()
1536 return perf_cgroup_event_time_now(event, now); in perf_event_time_now()
1545 static enum event_type_t get_event_type(struct perf_event *event) in get_event_type() argument
1547 struct perf_event_context *ctx = event->ctx; in get_event_type()
1556 if (event->group_leader != event) in get_event_type()
1557 event = event->group_leader; in get_event_type()
1559 event_type = event->attr.pinned ? EVENT_PINNED : EVENT_FLEXIBLE; in get_event_type()
1567 * Helper function to initialize event group nodes.
1569 static void init_event_group(struct perf_event *event) in init_event_group() argument
1571 RB_CLEAR_NODE(&event->group_node); in init_event_group()
1572 event->group_index = 0; in init_event_group()
1577 * based on event attrs bits.
1580 get_event_groups(struct perf_event *event, struct perf_event_context *ctx) in get_event_groups() argument
1582 if (event->attr.pinned) in get_event_groups()
1597 static inline struct cgroup *event_cgroup(const struct perf_event *event) in event_cgroup() argument
1602 if (event->cgrp) in event_cgroup()
1603 cgroup = event->cgrp->css.cgroup; in event_cgroup()
1610 * Compare function for event groups;
1705 * Insert @event into @groups' tree; using
1706 * {@event->cpu, @event->pmu_ctx->pmu, event_cgroup(@event), ++@groups->index}
1711 struct perf_event *event) in perf_event_groups_insert() argument
1713 event->group_index = ++groups->index; in perf_event_groups_insert()
1715 rb_add(&event->group_node, &groups->tree, __group_less); in perf_event_groups_insert()
1719 * Helper function to insert event into the pinned or flexible groups.
1722 add_event_to_groups(struct perf_event *event, struct perf_event_context *ctx) in add_event_to_groups() argument
1726 groups = get_event_groups(event, ctx); in add_event_to_groups()
1727 perf_event_groups_insert(groups, event); in add_event_to_groups()
1735 struct perf_event *event) in perf_event_groups_delete() argument
1737 WARN_ON_ONCE(RB_EMPTY_NODE(&event->group_node) || in perf_event_groups_delete()
1740 rb_erase(&event->group_node, &groups->tree); in perf_event_groups_delete()
1741 init_event_group(event); in perf_event_groups_delete()
1745 * Helper function to delete event from its groups.
1748 del_event_from_groups(struct perf_event *event, struct perf_event_context *ctx) in del_event_from_groups() argument
1752 groups = get_event_groups(event, ctx); in del_event_from_groups()
1753 perf_event_groups_delete(groups, event); in del_event_from_groups()
1757 * Get the leftmost event in the {cpu,pmu,cgroup} subtree.
1778 perf_event_groups_next(struct perf_event *event, struct pmu *pmu) in perf_event_groups_next() argument
1781 .cpu = event->cpu, in perf_event_groups_next()
1783 .cgroup = event_cgroup(event), in perf_event_groups_next()
1787 next = rb_next_match(&key, &event->group_node, __group_cmp); in perf_event_groups_next()
1794 #define perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) \ argument
1795 for (event = perf_event_groups_first(groups, cpu, pmu, NULL); \
1796 event; event = perf_event_groups_next(event, pmu))
1801 #define perf_event_groups_for_each(event, groups) \ argument
1802 for (event = rb_entry_safe(rb_first(&((groups)->tree)), \
1803 typeof(*event), group_node); event; \
1804 event = rb_entry_safe(rb_next(&event->group_node), \
1805 typeof(*event), group_node))
1808 * Does the event attribute request inherit with PERF_SAMPLE_READ
1816 * Add an event from the lists for its context.
1820 list_add_event(struct perf_event *event, struct perf_event_context *ctx) in list_add_event() argument
1824 WARN_ON_ONCE(event->attach_state & PERF_ATTACH_CONTEXT); in list_add_event()
1825 event->attach_state |= PERF_ATTACH_CONTEXT; in list_add_event()
1827 event->tstamp = perf_event_time(event); in list_add_event()
1830 * If we're a stand alone event or group leader, we go to the context in list_add_event()
1834 if (event->group_leader == event) { in list_add_event()
1835 event->group_caps = event->event_caps; in list_add_event()
1836 add_event_to_groups(event, ctx); in list_add_event()
1839 list_add_rcu(&event->event_entry, &ctx->event_list); in list_add_event()
1841 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_add_event()
1843 if (event->attr.inherit_stat) in list_add_event()
1845 if (has_inherit_and_sample_read(&event->attr)) in list_add_event()
1848 if (event->state > PERF_EVENT_STATE_OFF) in list_add_event()
1849 perf_cgroup_event_enable(event, ctx); in list_add_event()
1852 event->pmu_ctx->nr_events++; in list_add_event()
1856 * Initialize event state based on the perf_event_attr::disabled.
1858 static inline void perf_event__state_init(struct perf_event *event) in perf_event__state_init() argument
1860 event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : in perf_event__state_init()
1894 static void __perf_event_header_size(struct perf_event *event, u64 sample_type) in __perf_event_header_size() argument
1912 size += event->read_size; in __perf_event_header_size()
1932 event->header_size = size; in __perf_event_header_size()
1939 static void perf_event__header_size(struct perf_event *event) in perf_event__header_size() argument
1941 event->read_size = in perf_event__header_size()
1942 __perf_event_read_size(event->attr.read_format, in perf_event__header_size()
1943 event->group_leader->nr_siblings); in perf_event__header_size()
1944 __perf_event_header_size(event, event->attr.sample_type); in perf_event__header_size()
1947 static void perf_event__id_header_size(struct perf_event *event) in perf_event__id_header_size() argument
1950 u64 sample_type = event->attr.sample_type; in perf_event__id_header_size()
1971 event->id_header_size = size; in perf_event__id_header_size()
1975 * Check that adding an event to the group does not result in anybody
1976 * overflowing the 64k event limit imposed by the output buffer.
1978 * Specifically, check that the read_size for the event does not exceed 16k,
1980 * depends on per-event read_format, also (re)check the existing events.
1985 static bool perf_event_validate_size(struct perf_event *event) in perf_event_validate_size() argument
1987 struct perf_event *sibling, *group_leader = event->group_leader; in perf_event_validate_size()
1989 if (__perf_event_read_size(event->attr.read_format, in perf_event_validate_size()
2004 if (event == group_leader) in perf_event_validate_size()
2016 static void perf_group_attach(struct perf_event *event) in perf_group_attach() argument
2018 struct perf_event *group_leader = event->group_leader, *pos; in perf_group_attach()
2020 lockdep_assert_held(&event->ctx->lock); in perf_group_attach()
2026 if (event->attach_state & PERF_ATTACH_GROUP) in perf_group_attach()
2029 event->attach_state |= PERF_ATTACH_GROUP; in perf_group_attach()
2031 if (group_leader == event) in perf_group_attach()
2034 WARN_ON_ONCE(group_leader->ctx != event->ctx); in perf_group_attach()
2036 group_leader->group_caps &= event->event_caps; in perf_group_attach()
2038 list_add_tail(&event->sibling_list, &group_leader->sibling_list); in perf_group_attach()
2049 * Remove an event from the lists for its context.
2053 list_del_event(struct perf_event *event, struct perf_event_context *ctx) in list_del_event() argument
2055 WARN_ON_ONCE(event->ctx != ctx); in list_del_event()
2061 if (!(event->attach_state & PERF_ATTACH_CONTEXT)) in list_del_event()
2064 event->attach_state &= ~PERF_ATTACH_CONTEXT; in list_del_event()
2067 if (event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT) in list_del_event()
2069 if (event->attr.inherit_stat) in list_del_event()
2071 if (has_inherit_and_sample_read(&event->attr)) in list_del_event()
2074 list_del_rcu(&event->event_entry); in list_del_event()
2076 if (event->group_leader == event) in list_del_event()
2077 del_event_from_groups(event, ctx); in list_del_event()
2080 * If event was in error state, then keep it in list_del_event()
2084 * of the event in list_del_event()
2086 if (event->state > PERF_EVENT_STATE_OFF) { in list_del_event()
2087 perf_cgroup_event_disable(event, ctx); in list_del_event()
2088 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in list_del_event()
2092 event->pmu_ctx->nr_events--; in list_del_event()
2096 perf_aux_output_match(struct perf_event *event, struct perf_event *aux_event) in perf_aux_output_match() argument
2101 if (!event->pmu->aux_output_match) in perf_aux_output_match()
2104 return event->pmu->aux_output_match(aux_event); in perf_aux_output_match()
2107 static void put_event(struct perf_event *event);
2108 static void event_sched_out(struct perf_event *event,
2111 static void perf_put_aux_event(struct perf_event *event) in perf_put_aux_event() argument
2113 struct perf_event_context *ctx = event->ctx; in perf_put_aux_event()
2117 * If event uses aux_event tear down the link in perf_put_aux_event()
2119 if (event->aux_event) { in perf_put_aux_event()
2120 iter = event->aux_event; in perf_put_aux_event()
2121 event->aux_event = NULL; in perf_put_aux_event()
2127 * If the event is an aux_event, tear down all links to in perf_put_aux_event()
2130 for_each_sibling_event(iter, event->group_leader) { in perf_put_aux_event()
2131 if (iter->aux_event != event) in perf_put_aux_event()
2135 put_event(event); in perf_put_aux_event()
2143 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_put_aux_event()
2147 static bool perf_need_aux_event(struct perf_event *event) in perf_need_aux_event() argument
2149 return !!event->attr.aux_output || !!event->attr.aux_sample_size; in perf_need_aux_event()
2152 static int perf_get_aux_event(struct perf_event *event, in perf_get_aux_event() argument
2156 * Our group leader must be an aux event if we want to be in perf_get_aux_event()
2157 * an aux_output. This way, the aux event will precede its in perf_get_aux_event()
2167 if (event->attr.aux_output && event->attr.aux_sample_size) in perf_get_aux_event()
2170 if (event->attr.aux_output && in perf_get_aux_event()
2171 !perf_aux_output_match(event, group_leader)) in perf_get_aux_event()
2174 if (event->attr.aux_sample_size && !group_leader->pmu->snapshot_aux) in perf_get_aux_event()
2181 * Link aux_outputs to their aux event; this is undone in in perf_get_aux_event()
2186 event->aux_event = group_leader; in perf_get_aux_event()
2191 static inline struct list_head *get_event_list(struct perf_event *event) in get_event_list() argument
2193 return event->attr.pinned ? &event->pmu_ctx->pinned_active : in get_event_list()
2194 &event->pmu_ctx->flexible_active; in get_event_list()
2203 static inline void perf_remove_sibling_event(struct perf_event *event) in perf_remove_sibling_event() argument
2205 event_sched_out(event, event->ctx); in perf_remove_sibling_event()
2206 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in perf_remove_sibling_event()
2209 static void perf_group_detach(struct perf_event *event) in perf_group_detach() argument
2211 struct perf_event *leader = event->group_leader; in perf_group_detach()
2213 struct perf_event_context *ctx = event->ctx; in perf_group_detach()
2220 if (!(event->attach_state & PERF_ATTACH_GROUP)) in perf_group_detach()
2223 event->attach_state &= ~PERF_ATTACH_GROUP; in perf_group_detach()
2225 perf_put_aux_event(event); in perf_group_detach()
2230 if (leader != event) { in perf_group_detach()
2231 list_del_init(&event->sibling_list); in perf_group_detach()
2232 event->group_leader->nr_siblings--; in perf_group_detach()
2233 event->group_leader->group_generation++; in perf_group_detach()
2238 * If this was a group event with sibling events then in perf_group_detach()
2242 list_for_each_entry_safe(sibling, tmp, &event->sibling_list, sibling_list) { in perf_group_detach()
2251 sibling->group_caps = event->group_caps; in perf_group_detach()
2254 add_event_to_groups(sibling, event->ctx); in perf_group_detach()
2260 WARN_ON_ONCE(sibling->ctx != event->ctx); in perf_group_detach()
2272 static void perf_child_detach(struct perf_event *event) in perf_child_detach() argument
2274 struct perf_event *parent_event = event->parent; in perf_child_detach()
2276 if (!(event->attach_state & PERF_ATTACH_CHILD)) in perf_child_detach()
2279 event->attach_state &= ~PERF_ATTACH_CHILD; in perf_child_detach()
2286 sync_child_event(event); in perf_child_detach()
2287 list_del_init(&event->child_list); in perf_child_detach()
2290 static bool is_orphaned_event(struct perf_event *event) in is_orphaned_event() argument
2292 return event->state == PERF_EVENT_STATE_DEAD; in is_orphaned_event()
2296 event_filter_match(struct perf_event *event) in event_filter_match() argument
2298 return (event->cpu == -1 || event->cpu == smp_processor_id()) && in event_filter_match()
2299 perf_cgroup_match(event); in event_filter_match()
2303 event_sched_out(struct perf_event *event, struct perf_event_context *ctx) in event_sched_out() argument
2305 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_out()
2311 WARN_ON_ONCE(event->ctx != ctx); in event_sched_out()
2314 if (event->state != PERF_EVENT_STATE_ACTIVE) in event_sched_out()
2322 list_del_init(&event->active_list); in event_sched_out()
2324 perf_pmu_disable(event->pmu); in event_sched_out()
2326 event->pmu->del(event, 0); in event_sched_out()
2327 event->oncpu = -1; in event_sched_out()
2329 if (event->pending_disable) { in event_sched_out()
2330 event->pending_disable = 0; in event_sched_out()
2331 perf_cgroup_event_disable(event, ctx); in event_sched_out()
2335 perf_event_set_state(event, state); in event_sched_out()
2337 if (!is_software_event(event)) in event_sched_out()
2339 if (event->attr.freq && event->attr.sample_freq) { in event_sched_out()
2343 if (event->attr.exclusive || !cpc->active_oncpu) in event_sched_out()
2346 perf_pmu_enable(event->pmu); in event_sched_out()
2352 struct perf_event *event; in group_sched_out() local
2364 for_each_sibling_event(event, group_event) in group_sched_out()
2365 event_sched_out(event, ctx); in group_sched_out()
2397 ctx_time_update_event(struct perf_event_context *ctx, struct perf_event *event) in ctx_time_update_event() argument
2403 update_cgrp_time_from_event(event); in ctx_time_update_event()
2412 * Cross CPU call to remove a performance event
2414 * We disable the event on the hardware level first. After that we
2418 __perf_remove_from_context(struct perf_event *event, in __perf_remove_from_context() argument
2423 struct perf_event_pmu_context *pmu_ctx = event->pmu_ctx; in __perf_remove_from_context()
2433 event->pending_disable = 1; in __perf_remove_from_context()
2434 event_sched_out(event, ctx); in __perf_remove_from_context()
2436 perf_group_detach(event); in __perf_remove_from_context()
2438 perf_child_detach(event); in __perf_remove_from_context()
2439 list_del_event(event, ctx); in __perf_remove_from_context()
2441 event->state = PERF_EVENT_STATE_DEAD; in __perf_remove_from_context()
2468 * Remove the event from a task's (or a CPU's) list of events.
2470 * If event->ctx is a cloned context, callers must make sure that
2471 * every task struct that event->ctx->task could possibly point to
2477 static void perf_remove_from_context(struct perf_event *event, unsigned long flags) in perf_remove_from_context() argument
2479 struct perf_event_context *ctx = event->ctx; in perf_remove_from_context()
2490 __perf_remove_from_context(event, this_cpu_ptr(&perf_cpu_context), in perf_remove_from_context()
2497 event_function_call(event, __perf_remove_from_context, (void *)flags); in perf_remove_from_context()
2501 * Cross CPU call to disable a performance event
2503 static void __perf_event_disable(struct perf_event *event, in __perf_event_disable() argument
2508 if (event->state < PERF_EVENT_STATE_INACTIVE) in __perf_event_disable()
2511 perf_pmu_disable(event->pmu_ctx->pmu); in __perf_event_disable()
2512 ctx_time_update_event(ctx, event); in __perf_event_disable()
2514 if (event == event->group_leader) in __perf_event_disable()
2515 group_sched_out(event, ctx); in __perf_event_disable()
2517 event_sched_out(event, ctx); in __perf_event_disable()
2519 perf_event_set_state(event, PERF_EVENT_STATE_OFF); in __perf_event_disable()
2520 perf_cgroup_event_disable(event, ctx); in __perf_event_disable()
2522 perf_pmu_enable(event->pmu_ctx->pmu); in __perf_event_disable()
2526 * Disable an event.
2528 * If event->ctx is a cloned context, callers must make sure that
2529 * every task struct that event->ctx->task could possibly point to
2532 * hold the top-level event's child_mutex, so any descendant that
2535 * When called from perf_pending_disable it's OK because event->ctx
2539 static void _perf_event_disable(struct perf_event *event) in _perf_event_disable() argument
2541 struct perf_event_context *ctx = event->ctx; in _perf_event_disable()
2544 if (event->state <= PERF_EVENT_STATE_OFF) { in _perf_event_disable()
2550 event_function_call(event, __perf_event_disable, NULL); in _perf_event_disable()
2553 void perf_event_disable_local(struct perf_event *event) in perf_event_disable_local() argument
2555 event_function_local(event, __perf_event_disable, NULL); in perf_event_disable_local()
2562 void perf_event_disable(struct perf_event *event) in perf_event_disable() argument
2566 ctx = perf_event_ctx_lock(event); in perf_event_disable()
2567 _perf_event_disable(event); in perf_event_disable()
2568 perf_event_ctx_unlock(event, ctx); in perf_event_disable()
2572 void perf_event_disable_inatomic(struct perf_event *event) in perf_event_disable_inatomic() argument
2574 event->pending_disable = 1; in perf_event_disable_inatomic()
2575 irq_work_queue(&event->pending_disable_irq); in perf_event_disable_inatomic()
2580 static void perf_log_throttle(struct perf_event *event, int enable);
2581 static void perf_log_itrace_start(struct perf_event *event);
2584 event_sched_in(struct perf_event *event, struct perf_event_context *ctx) in event_sched_in() argument
2586 struct perf_event_pmu_context *epc = event->pmu_ctx; in event_sched_in()
2590 WARN_ON_ONCE(event->ctx != ctx); in event_sched_in()
2594 if (event->state <= PERF_EVENT_STATE_OFF) in event_sched_in()
2597 WRITE_ONCE(event->oncpu, smp_processor_id()); in event_sched_in()
2599 * Order event::oncpu write to happen before the ACTIVE state is in event_sched_in()
2604 perf_event_set_state(event, PERF_EVENT_STATE_ACTIVE); in event_sched_in()
2611 if (unlikely(event->hw.interrupts == MAX_INTERRUPTS)) { in event_sched_in()
2612 perf_log_throttle(event, 1); in event_sched_in()
2613 event->hw.interrupts = 0; in event_sched_in()
2616 perf_pmu_disable(event->pmu); in event_sched_in()
2618 perf_log_itrace_start(event); in event_sched_in()
2620 if (event->pmu->add(event, PERF_EF_START)) { in event_sched_in()
2621 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_sched_in()
2622 event->oncpu = -1; in event_sched_in()
2627 if (!is_software_event(event)) in event_sched_in()
2629 if (event->attr.freq && event->attr.sample_freq) { in event_sched_in()
2633 if (event->attr.exclusive) in event_sched_in()
2637 perf_pmu_enable(event->pmu); in event_sched_in()
2645 struct perf_event *event, *partial_group = NULL; in group_sched_in() local
2659 for_each_sibling_event(event, group_event) { in group_sched_in()
2660 if (event_sched_in(event, ctx)) { in group_sched_in()
2661 partial_group = event; in group_sched_in()
2673 * The events up to the failed event are scheduled out normally. in group_sched_in()
2675 for_each_sibling_event(event, group_event) { in group_sched_in()
2676 if (event == partial_group) in group_sched_in()
2679 event_sched_out(event, ctx); in group_sched_in()
2689 * Work out whether we can put this event group on the CPU now.
2691 static int group_can_go_on(struct perf_event *event, int can_add_hw) in group_can_go_on() argument
2693 struct perf_event_pmu_context *epc = event->pmu_ctx; in group_can_go_on()
2699 if (event->group_caps & PERF_EV_CAP_SOFTWARE) in group_can_go_on()
2711 if (event->attr.exclusive && !list_empty(get_event_list(event))) in group_can_go_on()
2720 static void add_event_to_ctx(struct perf_event *event, in add_event_to_ctx() argument
2723 list_add_event(event, ctx); in add_event_to_ctx()
2724 perf_group_attach(event); in add_event_to_ctx()
2762 * time an event is added, only do it for the groups of equal priority and
2829 * Cross CPU call to install and enable a performance event
2836 struct perf_event *event = info; in __perf_install_in_context() local
2837 struct perf_event_context *ctx = event->ctx; in __perf_install_in_context()
2868 if (event->state > PERF_EVENT_STATE_OFF && is_cgroup_event(event)) { in __perf_install_in_context()
2870 * If the current cgroup doesn't match the event's in __perf_install_in_context()
2875 event->cgrp->css.cgroup); in __perf_install_in_context()
2881 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2882 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, in __perf_install_in_context()
2883 get_event_type(event)); in __perf_install_in_context()
2885 add_event_to_ctx(event, ctx); in __perf_install_in_context()
2894 static bool exclusive_event_installable(struct perf_event *event,
2898 * Attach a performance event to a context.
2904 struct perf_event *event, in perf_install_in_context() argument
2911 WARN_ON_ONCE(!exclusive_event_installable(event, ctx)); in perf_install_in_context()
2913 if (event->cpu != -1) in perf_install_in_context()
2914 WARN_ON_ONCE(event->cpu != cpu); in perf_install_in_context()
2917 * Ensures that if we can observe event->ctx, both the event and ctx in perf_install_in_context()
2920 smp_store_release(&event->ctx, ctx); in perf_install_in_context()
2924 * without IPI. Except when this is the first event for the context, in in perf_install_in_context()
2928 * event will issue the IPI and reprogram the hardware. in perf_install_in_context()
2930 if (__perf_effective_state(event) == PERF_EVENT_STATE_OFF && in perf_install_in_context()
2931 ctx->nr_events && !is_cgroup_event(event)) { in perf_install_in_context()
2937 add_event_to_ctx(event, ctx); in perf_install_in_context()
2943 cpu_function_call(cpu, __perf_install_in_context, event); in perf_install_in_context()
2985 if (!task_function_call(task, __perf_install_in_context, event)) in perf_install_in_context()
3001 * thus we can safely install the event. in perf_install_in_context()
3007 add_event_to_ctx(event, ctx); in perf_install_in_context()
3012 * Cross CPU call to enable a performance event
3014 static void __perf_event_enable(struct perf_event *event, in __perf_event_enable() argument
3019 struct perf_event *leader = event->group_leader; in __perf_event_enable()
3022 if (event->state >= PERF_EVENT_STATE_INACTIVE || in __perf_event_enable()
3023 event->state <= PERF_EVENT_STATE_ERROR) in __perf_event_enable()
3028 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in __perf_event_enable()
3029 perf_cgroup_event_enable(event, ctx); in __perf_event_enable()
3034 if (!event_filter_match(event)) in __perf_event_enable()
3038 * If the event is in a group and isn't the group leader, in __perf_event_enable()
3041 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_enable()
3048 ctx_resched(cpuctx, task_ctx, event->pmu_ctx->pmu, get_event_type(event)); in __perf_event_enable()
3052 * Enable an event.
3054 * If event->ctx is a cloned context, callers must make sure that
3055 * every task struct that event->ctx->task could possibly point to
3060 static void _perf_event_enable(struct perf_event *event) in _perf_event_enable() argument
3062 struct perf_event_context *ctx = event->ctx; in _perf_event_enable()
3065 if (event->state >= PERF_EVENT_STATE_INACTIVE || in _perf_event_enable()
3066 event->state < PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3073 * If the event is in error state, clear that first. in _perf_event_enable()
3075 * That way, if we see the event in error state below, we know that it in _perf_event_enable()
3079 if (event->state == PERF_EVENT_STATE_ERROR) { in _perf_event_enable()
3083 if (event->event_caps & PERF_EV_CAP_SIBLING && in _perf_event_enable()
3084 event->group_leader == event) in _perf_event_enable()
3087 event->state = PERF_EVENT_STATE_OFF; in _perf_event_enable()
3091 event_function_call(event, __perf_event_enable, NULL); in _perf_event_enable()
3097 void perf_event_enable(struct perf_event *event) in perf_event_enable() argument
3101 ctx = perf_event_ctx_lock(event); in perf_event_enable()
3102 _perf_event_enable(event); in perf_event_enable()
3103 perf_event_ctx_unlock(event, ctx); in perf_event_enable()
3108 struct perf_event *event; member
3115 struct perf_event *event = sd->event; in __perf_event_stop() local
3118 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in __perf_event_stop()
3126 * so we need to check again lest we try to stop another CPU's event. in __perf_event_stop()
3128 if (READ_ONCE(event->oncpu) != smp_processor_id()) in __perf_event_stop()
3131 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_stop()
3139 * Since this is happening on an event-local CPU, no trace is lost in __perf_event_stop()
3143 event->pmu->start(event, 0); in __perf_event_stop()
3148 static int perf_event_stop(struct perf_event *event, int restart) in perf_event_stop() argument
3151 .event = event, in perf_event_stop()
3157 if (READ_ONCE(event->state) != PERF_EVENT_STATE_ACTIVE) in perf_event_stop()
3164 * We only want to restart ACTIVE events, so if the event goes in perf_event_stop()
3165 * inactive here (event->oncpu==-1), there's nothing more to do; in perf_event_stop()
3168 ret = cpu_function_call(READ_ONCE(event->oncpu), in perf_event_stop()
3181 * event::addr_filter_ranges array and bump the event::addr_filters_gen;
3182 * (p2) when an event is scheduled in (pmu::add), it calls
3186 * If (p1) happens while the event is active, we restart it to force (p2).
3197 void perf_event_addr_filters_sync(struct perf_event *event) in perf_event_addr_filters_sync() argument
3199 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_sync()
3201 if (!has_addr_filter(event)) in perf_event_addr_filters_sync()
3205 if (event->addr_filters_gen != event->hw.addr_filters_gen) { in perf_event_addr_filters_sync()
3206 event->pmu->addr_filters_sync(event); in perf_event_addr_filters_sync()
3207 event->hw.addr_filters_gen = event->addr_filters_gen; in perf_event_addr_filters_sync()
3213 static int _perf_event_refresh(struct perf_event *event, int refresh) in _perf_event_refresh() argument
3218 if (event->attr.inherit || !is_sampling_event(event)) in _perf_event_refresh()
3221 atomic_add(refresh, &event->event_limit); in _perf_event_refresh()
3222 _perf_event_enable(event); in _perf_event_refresh()
3230 int perf_event_refresh(struct perf_event *event, int refresh) in perf_event_refresh() argument
3235 ctx = perf_event_ctx_lock(event); in perf_event_refresh()
3236 ret = _perf_event_refresh(event, refresh); in perf_event_refresh()
3237 perf_event_ctx_unlock(event, ctx); in perf_event_refresh()
3259 * Copy event-type-independent attributes that may be modified.
3267 static int perf_event_modify_attr(struct perf_event *event, in perf_event_modify_attr() argument
3274 if (event->attr.type != attr->type) in perf_event_modify_attr()
3277 switch (event->attr.type) { in perf_event_modify_attr()
3286 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_modify_attr()
3288 mutex_lock(&event->child_mutex); in perf_event_modify_attr()
3290 * Event-type-independent attributes must be copied before event-type in perf_event_modify_attr()
3294 perf_event_modify_copy_attr(&event->attr, attr); in perf_event_modify_attr()
3295 err = func(event, attr); in perf_event_modify_attr()
3298 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_modify_attr()
3305 mutex_unlock(&event->child_mutex); in perf_event_modify_attr()
3313 struct perf_event *event, *tmp; in __pmu_ctx_sched_out() local
3329 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3332 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3336 list_for_each_entry_safe(event, tmp, in __pmu_ctx_sched_out()
3339 group_sched_out(event, ctx); in __pmu_ctx_sched_out()
3461 static void __perf_event_sync_stat(struct perf_event *event, in __perf_event_sync_stat() argument
3466 if (!event->attr.inherit_stat) in __perf_event_sync_stat()
3470 * Update the event value, we cannot use perf_event_read() in __perf_event_sync_stat()
3473 * we know the event must be on the current CPU, therefore we in __perf_event_sync_stat()
3476 if (event->state == PERF_EVENT_STATE_ACTIVE) in __perf_event_sync_stat()
3477 event->pmu->read(event); in __perf_event_sync_stat()
3479 perf_event_update_time(event); in __perf_event_sync_stat()
3482 * In order to keep per-task stats reliable we need to flip the event in __perf_event_sync_stat()
3486 value = local64_xchg(&event->count, value); in __perf_event_sync_stat()
3489 swap(event->total_time_enabled, next_event->total_time_enabled); in __perf_event_sync_stat()
3490 swap(event->total_time_running, next_event->total_time_running); in __perf_event_sync_stat()
3495 perf_event_update_userpage(event); in __perf_event_sync_stat()
3502 struct perf_event *event, *next_event; in perf_event_sync_stat() local
3509 event = list_first_entry(&ctx->event_list, in perf_event_sync_stat()
3515 while (&event->event_entry != &ctx->event_list && in perf_event_sync_stat()
3518 __perf_event_sync_stat(event, next_event); in perf_event_sync_stat()
3520 event = list_next_entry(event, event_entry); in perf_event_sync_stat()
3701 * This callback is relevant even to per-cpu events; for example multi event
3747 * We stop each event and update the event value in event->count.
3750 * sets the disabled bit in the control field of event _before_
3751 * accessing the event control register. If a NMI hits, then it will
3752 * not restart the event.
3768 * cgroup event are system-wide mode only in __perf_event_task_sched_out()
3795 static void __heap_add(struct perf_event_min_heap *heap, struct perf_event *event) in __heap_add() argument
3799 if (event) { in __heap_add()
3800 itrs[heap->nr] = event; in __heap_add()
3827 /* Space for per CPU and/or any CPU event iterators. */ in visit_groups_merge()
3891 * Because the userpage is strictly per-event (there is no concept of context,
3897 static inline bool event_update_userpage(struct perf_event *event) in event_update_userpage() argument
3899 if (likely(!atomic_read(&event->mmap_count))) in event_update_userpage()
3902 perf_event_update_time(event); in event_update_userpage()
3903 perf_event_update_userpage(event); in event_update_userpage()
3910 struct perf_event *event; in group_update_userpage() local
3915 for_each_sibling_event(event, group_event) in group_update_userpage()
3916 event_update_userpage(event); in group_update_userpage()
3919 static int merge_sched_in(struct perf_event *event, void *data) in merge_sched_in() argument
3921 struct perf_event_context *ctx = event->ctx; in merge_sched_in()
3924 if (event->state <= PERF_EVENT_STATE_OFF) in merge_sched_in()
3927 if (!event_filter_match(event)) in merge_sched_in()
3930 if (group_can_go_on(event, *can_add_hw)) { in merge_sched_in()
3931 if (!group_sched_in(event, ctx)) in merge_sched_in()
3932 list_add_tail(&event->active_list, get_event_list(event)); in merge_sched_in()
3935 if (event->state == PERF_EVENT_STATE_INACTIVE) { in merge_sched_in()
3937 if (event->attr.pinned) { in merge_sched_in()
3938 perf_cgroup_event_disable(event, ctx); in merge_sched_in()
3939 perf_event_set_state(event, PERF_EVENT_STATE_ERROR); in merge_sched_in()
3943 event->pmu_ctx->rotate_necessary = 1; in merge_sched_in()
3944 cpc = this_cpu_ptr(event->pmu_ctx->pmu->cpu_pmu_context); in merge_sched_in()
3946 group_update_userpage(event); in merge_sched_in()
4087 * We restore the event value and then enable it.
4090 * sets the enabled bit in the control field of event _before_
4091 * accessing the event control register. If a NMI hits, then it will
4092 * keep the event running.
4106 static u64 perf_calculate_period(struct perf_event *event, u64 nsec, u64 count) in perf_calculate_period() argument
4108 u64 frequency = event->attr.sample_freq; in perf_calculate_period()
4182 static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count, bool disable) in perf_adjust_period() argument
4184 struct hw_perf_event *hwc = &event->hw; in perf_adjust_period()
4188 period = perf_calculate_period(event, nsec, count); in perf_adjust_period()
4206 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_period()
4211 event->pmu->start(event, PERF_EF_RELOAD); in perf_adjust_period()
4217 struct perf_event *event; in perf_adjust_freq_unthr_events() local
4222 list_for_each_entry(event, event_list, active_list) { in perf_adjust_freq_unthr_events()
4223 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_adjust_freq_unthr_events()
4227 if (!event_filter_match(event)) in perf_adjust_freq_unthr_events()
4230 hwc = &event->hw; in perf_adjust_freq_unthr_events()
4234 perf_log_throttle(event, 1); in perf_adjust_freq_unthr_events()
4235 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4236 event->pmu->start(event, 0); in perf_adjust_freq_unthr_events()
4239 if (!event->attr.freq || !event->attr.sample_freq) in perf_adjust_freq_unthr_events()
4243 * stop the event and update event->count in perf_adjust_freq_unthr_events()
4245 event->pmu->stop(event, PERF_EF_UPDATE); in perf_adjust_freq_unthr_events()
4247 now = local64_read(&event->count); in perf_adjust_freq_unthr_events()
4252 * restart the event in perf_adjust_freq_unthr_events()
4254 * we have stopped the event so tell that in perf_adjust_freq_unthr_events()
4259 perf_adjust_period(event, period, delta, false); in perf_adjust_freq_unthr_events()
4261 event->pmu->start(event, delta > 0 ? PERF_EF_RELOAD : 0); in perf_adjust_freq_unthr_events()
4303 * Move @event to the tail of the @ctx's elegible events.
4305 static void rotate_ctx(struct perf_event_context *ctx, struct perf_event *event) in rotate_ctx() argument
4314 perf_event_groups_delete(&ctx->flexible_groups, event); in rotate_ctx()
4315 perf_event_groups_insert(&ctx->flexible_groups, event); in rotate_ctx()
4318 /* pick an event from the flexible_groups to rotate */
4322 struct perf_event *event; in ctx_event_to_rotate() local
4329 /* pick the first active flexible event */ in ctx_event_to_rotate()
4330 event = list_first_entry_or_null(&pmu_ctx->flexible_active, in ctx_event_to_rotate()
4332 if (event) in ctx_event_to_rotate()
4335 /* if no active flexible event, pick the first event */ in ctx_event_to_rotate()
4343 event = __node_2_pe(node); in ctx_event_to_rotate()
4350 event = __node_2_pe(node); in ctx_event_to_rotate()
4357 event = __node_2_pe(node); in ctx_event_to_rotate()
4366 return event; in ctx_event_to_rotate()
4379 * events, thus the event count values are stable. in perf_rotate_context()
4449 static int event_enable_on_exec(struct perf_event *event, in event_enable_on_exec() argument
4452 if (!event->attr.enable_on_exec) in event_enable_on_exec()
4455 event->attr.enable_on_exec = 0; in event_enable_on_exec()
4456 if (event->state >= PERF_EVENT_STATE_INACTIVE) in event_enable_on_exec()
4459 perf_event_set_state(event, PERF_EVENT_STATE_INACTIVE); in event_enable_on_exec()
4473 struct perf_event *event; in perf_event_enable_on_exec() local
4488 list_for_each_entry(event, &ctx->event_list, event_entry) { in perf_event_enable_on_exec()
4489 enabled |= event_enable_on_exec(event, ctx); in perf_event_enable_on_exec()
4490 event_type |= get_event_type(event); in perf_event_enable_on_exec()
4494 * Unclone and reschedule this context if we enabled any event. in perf_event_enable_on_exec()
4509 static void perf_remove_from_owner(struct perf_event *event);
4510 static void perf_event_exit_event(struct perf_event *event,
4520 struct perf_event *event, *next; in perf_event_remove_on_exec() local
4529 list_for_each_entry_safe(event, next, &ctx->event_list, event_entry) { in perf_event_remove_on_exec()
4530 if (!event->attr.remove_on_exec) in perf_event_remove_on_exec()
4533 if (!is_kernel_event(event)) in perf_event_remove_on_exec()
4534 perf_remove_from_owner(event); in perf_event_remove_on_exec()
4538 perf_event_exit_event(event, ctx); in perf_event_remove_on_exec()
4554 struct perf_event *event; member
4561 static int __perf_event_read_cpu(struct perf_event *event, int event_cpu) in __perf_event_read_cpu() argument
4569 if (event->group_caps & PERF_EV_CAP_READ_SCOPE) { in __perf_event_read_cpu()
4570 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(event->pmu->scope, event_cpu); in __perf_event_read_cpu()
4576 if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) { in __perf_event_read_cpu()
4588 * Cross CPU call to read the hardware event
4593 struct perf_event *sub, *event = data->event; in __perf_event_read() local
4594 struct perf_event_context *ctx = event->ctx; in __perf_event_read()
4596 struct pmu *pmu = event->pmu; in __perf_event_read()
4602 * event->count would have been updated to a recent sample in __perf_event_read()
4603 * when the event was scheduled out. in __perf_event_read()
4609 ctx_time_update_event(ctx, event); in __perf_event_read()
4611 perf_event_update_time(event); in __perf_event_read()
4613 perf_event_update_sibling_time(event); in __perf_event_read()
4615 if (event->state != PERF_EVENT_STATE_ACTIVE) in __perf_event_read()
4619 pmu->read(event); in __perf_event_read()
4626 pmu->read(event); in __perf_event_read()
4628 for_each_sibling_event(sub, event) { in __perf_event_read()
4631 * Use sibling's PMU rather than @event's since in __perf_event_read()
4644 static inline u64 perf_event_count(struct perf_event *event, bool self) in perf_event_count() argument
4647 return local64_read(&event->count); in perf_event_count()
4649 return local64_read(&event->count) + atomic64_read(&event->child_count); in perf_event_count()
4652 static void calc_timer_values(struct perf_event *event, in calc_timer_values() argument
4660 ctx_time = perf_event_time_now(event, *now); in calc_timer_values()
4661 __perf_update_times(event, ctx_time, enabled, running); in calc_timer_values()
4665 * NMI-safe method to read a local event, that is an event that
4672 int perf_event_read_local(struct perf_event *event, u64 *value, in perf_event_read_local() argument
4687 * It must not be an event with inherit set, we cannot read in perf_event_read_local()
4690 if (event->attr.inherit) { in perf_event_read_local()
4695 /* If this is a per-task event, it must be for current */ in perf_event_read_local()
4696 if ((event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4697 event->hw.target != current) { in perf_event_read_local()
4703 * Get the event CPU numbers, and adjust them to local if the event is in perf_event_read_local()
4704 * a per-package event that can be read locally in perf_event_read_local()
4706 event_oncpu = __perf_event_read_cpu(event, event->oncpu); in perf_event_read_local()
4707 event_cpu = __perf_event_read_cpu(event, event->cpu); in perf_event_read_local()
4709 /* If this is a per-CPU event, it must be for this CPU */ in perf_event_read_local()
4710 if (!(event->attach_state & PERF_ATTACH_TASK) && in perf_event_read_local()
4716 /* If this is a pinned event it must be running on this CPU */ in perf_event_read_local()
4717 if (event->attr.pinned && event_oncpu != smp_processor_id()) { in perf_event_read_local()
4723 * If the event is currently on this CPU, its either a per-task event, in perf_event_read_local()
4728 event->pmu->read(event); in perf_event_read_local()
4730 *value = local64_read(&event->count); in perf_event_read_local()
4734 calc_timer_values(event, &__now, &__enabled, &__running); in perf_event_read_local()
4746 static int perf_event_read(struct perf_event *event, bool group) in perf_event_read() argument
4748 enum perf_event_state state = READ_ONCE(event->state); in perf_event_read()
4752 * If event is enabled and currently active on a CPU, update the in perf_event_read()
4753 * value in the event structure: in perf_event_read()
4767 event_cpu = READ_ONCE(event->oncpu); in perf_event_read()
4772 .event = event, in perf_event_read()
4778 event_cpu = __perf_event_read_cpu(event, event_cpu); in perf_event_read()
4784 * If event_cpu isn't a valid CPU it means the event got in perf_event_read()
4785 * scheduled out and that will have updated the event count. in perf_event_read()
4787 * Therefore, either way, we'll have an up-to-date event count in perf_event_read()
4795 struct perf_event_context *ctx = event->ctx; in perf_event_read()
4799 state = event->state; in perf_event_read()
4809 ctx_time_update_event(ctx, event); in perf_event_read()
4811 perf_event_update_time(event); in perf_event_read()
4813 perf_event_update_sibling_time(event); in perf_event_read()
4884 find_get_context(struct task_struct *task, struct perf_event *event) in find_get_context() argument
4892 /* Must be root to operate on a CPU event: */ in find_get_context()
4893 err = perf_allow_cpu(&event->attr); in find_get_context()
4897 cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in find_get_context()
4958 struct perf_event *event) in find_get_pmu_context() argument
4971 cpc = per_cpu_ptr(pmu->cpu_pmu_context, event->cpu); in find_get_pmu_context()
4991 if (event->attach_state & PERF_ATTACH_TASK_DATA) { in find_get_pmu_context()
5084 static void perf_event_free_filter(struct perf_event *event);
5088 struct perf_event *event = container_of(head, typeof(*event), rcu_head); in free_event_rcu() local
5090 if (event->ns) in free_event_rcu()
5091 put_pid_ns(event->ns); in free_event_rcu()
5092 perf_event_free_filter(event); in free_event_rcu()
5093 kmem_cache_free(perf_event_cache, event); in free_event_rcu()
5096 static void ring_buffer_attach(struct perf_event *event,
5099 static void detach_sb_event(struct perf_event *event) in detach_sb_event() argument
5101 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in detach_sb_event()
5104 list_del_rcu(&event->sb_list); in detach_sb_event()
5108 static bool is_sb_event(struct perf_event *event) in is_sb_event() argument
5110 struct perf_event_attr *attr = &event->attr; in is_sb_event()
5112 if (event->parent) in is_sb_event()
5115 if (event->attach_state & PERF_ATTACH_TASK) in is_sb_event()
5127 static void unaccount_pmu_sb_event(struct perf_event *event) in unaccount_pmu_sb_event() argument
5129 if (is_sb_event(event)) in unaccount_pmu_sb_event()
5130 detach_sb_event(event); in unaccount_pmu_sb_event()
5155 static void unaccount_event(struct perf_event *event) in unaccount_event() argument
5159 if (event->parent) in unaccount_event()
5162 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in unaccount_event()
5164 if (event->attr.mmap || event->attr.mmap_data) in unaccount_event()
5166 if (event->attr.build_id) in unaccount_event()
5168 if (event->attr.comm) in unaccount_event()
5170 if (event->attr.namespaces) in unaccount_event()
5172 if (event->attr.cgroup) in unaccount_event()
5174 if (event->attr.task) in unaccount_event()
5176 if (event->attr.freq) in unaccount_event()
5178 if (event->attr.context_switch) { in unaccount_event()
5182 if (is_cgroup_event(event)) in unaccount_event()
5184 if (has_branch_stack(event)) in unaccount_event()
5186 if (event->attr.ksymbol) in unaccount_event()
5188 if (event->attr.bpf_event) in unaccount_event()
5190 if (event->attr.text_poke) in unaccount_event()
5198 unaccount_pmu_sb_event(event); in unaccount_event()
5211 * (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
5221 static int exclusive_event_init(struct perf_event *event) in exclusive_event_init() argument
5223 struct pmu *pmu = event->pmu; in exclusive_event_init()
5236 * Since this is called in perf_event_alloc() path, event::ctx in exclusive_event_init()
5238 * to mean "per-task event", because unlike other attach states it in exclusive_event_init()
5241 if (event->attach_state & PERF_ATTACH_TASK) { in exclusive_event_init()
5252 static void exclusive_event_destroy(struct perf_event *event) in exclusive_event_destroy() argument
5254 struct pmu *pmu = event->pmu; in exclusive_event_destroy()
5260 if (event->attach_state & PERF_ATTACH_TASK) in exclusive_event_destroy()
5276 static bool exclusive_event_installable(struct perf_event *event, in exclusive_event_installable() argument
5280 struct pmu *pmu = event->pmu; in exclusive_event_installable()
5288 if (exclusive_event_match(iter_event, event)) in exclusive_event_installable()
5295 static void perf_addr_filters_splice(struct perf_event *event,
5298 static void perf_pending_task_sync(struct perf_event *event) in perf_pending_task_sync() argument
5300 struct callback_head *head = &event->pending_task; in perf_pending_task_sync()
5302 if (!event->pending_work) in perf_pending_task_sync()
5309 event->pending_work = 0; in perf_pending_task_sync()
5310 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task_sync()
5315 * All accesses related to the event are within the same RCU section in in perf_pending_task_sync()
5316 * perf_pending_task(). The RCU grace period before the event is freed in perf_pending_task_sync()
5319 rcuwait_wait_event(&event->pending_work_wait, !event->pending_work, TASK_UNINTERRUPTIBLE); in perf_pending_task_sync()
5322 static void _free_event(struct perf_event *event) in _free_event() argument
5324 irq_work_sync(&event->pending_irq); in _free_event()
5325 irq_work_sync(&event->pending_disable_irq); in _free_event()
5326 perf_pending_task_sync(event); in _free_event()
5328 unaccount_event(event); in _free_event()
5330 security_perf_event_free(event); in _free_event()
5332 if (event->rb) { in _free_event()
5334 * Can happen when we close an event with re-directed output. in _free_event()
5339 mutex_lock(&event->mmap_mutex); in _free_event()
5340 ring_buffer_attach(event, NULL); in _free_event()
5341 mutex_unlock(&event->mmap_mutex); in _free_event()
5344 if (is_cgroup_event(event)) in _free_event()
5345 perf_detach_cgroup(event); in _free_event()
5347 if (!event->parent) { in _free_event()
5348 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in _free_event()
5352 perf_event_free_bpf_prog(event); in _free_event()
5353 perf_addr_filters_splice(event, NULL); in _free_event()
5354 kfree(event->addr_filter_ranges); in _free_event()
5356 if (event->destroy) in _free_event()
5357 event->destroy(event); in _free_event()
5363 if (event->hw.target) in _free_event()
5364 put_task_struct(event->hw.target); in _free_event()
5366 if (event->pmu_ctx) in _free_event()
5367 put_pmu_ctx(event->pmu_ctx); in _free_event()
5373 if (event->ctx) in _free_event()
5374 put_ctx(event->ctx); in _free_event()
5376 exclusive_event_destroy(event); in _free_event()
5377 module_put(event->pmu->module); in _free_event()
5379 call_rcu(&event->rcu_head, free_event_rcu); in _free_event()
5384 * where the event isn't exposed yet and inherited events.
5386 static void free_event(struct perf_event *event) in free_event() argument
5388 if (WARN(atomic_long_cmpxchg(&event->refcount, 1, 0) != 1, in free_event()
5389 "unexpected event refcount: %ld; ptr=%p\n", in free_event()
5390 atomic_long_read(&event->refcount), event)) { in free_event()
5395 _free_event(event); in free_event()
5399 * Remove user event from the owner task.
5401 static void perf_remove_from_owner(struct perf_event *event) in perf_remove_from_owner() argument
5409 * indeed free this event, otherwise we need to serialize on in perf_remove_from_owner()
5412 owner = READ_ONCE(event->owner); in perf_remove_from_owner()
5435 * We have to re-check the event->owner field, if it is cleared in perf_remove_from_owner()
5438 * event. in perf_remove_from_owner()
5440 if (event->owner) { in perf_remove_from_owner()
5441 list_del_init(&event->owner_entry); in perf_remove_from_owner()
5442 smp_store_release(&event->owner, NULL); in perf_remove_from_owner()
5449 static void put_event(struct perf_event *event) in put_event() argument
5451 if (!atomic_long_dec_and_test(&event->refcount)) in put_event()
5454 _free_event(event); in put_event()
5458 * Kill an event dead; while event:refcount will preserve the event
5462 int perf_event_release_kernel(struct perf_event *event) in perf_event_release_kernel() argument
5464 struct perf_event_context *ctx = event->ctx; in perf_event_release_kernel()
5469 * If we got here through err_alloc: free_event(event); we will not in perf_event_release_kernel()
5473 WARN_ON_ONCE(event->attach_state & in perf_event_release_kernel()
5478 if (!is_kernel_event(event)) in perf_event_release_kernel()
5479 perf_remove_from_owner(event); in perf_event_release_kernel()
5481 ctx = perf_event_ctx_lock(event); in perf_event_release_kernel()
5485 * Mark this event as STATE_DEAD, there is no external reference to it in perf_event_release_kernel()
5488 * Anybody acquiring event->child_mutex after the below loop _must_ in perf_event_release_kernel()
5495 perf_remove_from_context(event, DETACH_GROUP|DETACH_DEAD); in perf_event_release_kernel()
5497 perf_event_ctx_unlock(event, ctx); in perf_event_release_kernel()
5500 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5501 list_for_each_entry(child, &event->child_list, child_list) { in perf_event_release_kernel()
5513 * Since the event cannot get freed while we hold the in perf_event_release_kernel()
5524 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5526 mutex_lock(&event->child_mutex); in perf_event_release_kernel()
5533 tmp = list_first_entry_or_null(&event->child_list, in perf_event_release_kernel()
5542 put_event(event); in perf_event_release_kernel()
5547 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5562 mutex_unlock(&event->child_mutex); in perf_event_release_kernel()
5571 * Wake any perf_event_free_task() waiting for this event to be in perf_event_release_kernel()
5579 put_event(event); /* Must be the 'last' reference */ in perf_event_release_kernel()
5593 static u64 __perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in __perf_event_read_value() argument
5601 mutex_lock(&event->child_mutex); in __perf_event_read_value()
5603 (void)perf_event_read(event, false); in __perf_event_read_value()
5604 total += perf_event_count(event, false); in __perf_event_read_value()
5606 *enabled += event->total_time_enabled + in __perf_event_read_value()
5607 atomic64_read(&event->child_total_time_enabled); in __perf_event_read_value()
5608 *running += event->total_time_running + in __perf_event_read_value()
5609 atomic64_read(&event->child_total_time_running); in __perf_event_read_value()
5611 list_for_each_entry(child, &event->child_list, child_list) { in __perf_event_read_value()
5617 mutex_unlock(&event->child_mutex); in __perf_event_read_value()
5622 u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) in perf_event_read_value() argument
5627 ctx = perf_event_ctx_lock(event); in perf_event_read_value()
5628 count = __perf_event_read_value(event, enabled, running); in perf_event_read_value()
5629 perf_event_ctx_unlock(event, ctx); in perf_event_read_value()
5714 static int perf_read_group(struct perf_event *event, in perf_read_group() argument
5717 struct perf_event *leader = event->group_leader, *child; in perf_read_group()
5724 values = kzalloc(event->read_size, GFP_KERNEL); in perf_read_group()
5744 ret = event->read_size; in perf_read_group()
5745 if (copy_to_user(buf, values, event->read_size)) in perf_read_group()
5756 static int perf_read_one(struct perf_event *event, in perf_read_one() argument
5763 values[n++] = __perf_event_read_value(event, &enabled, &running); in perf_read_one()
5769 values[n++] = primary_event_id(event); in perf_read_one()
5771 values[n++] = atomic64_read(&event->lost_samples); in perf_read_one()
5779 static bool is_event_hup(struct perf_event *event) in is_event_hup() argument
5783 if (event->state > PERF_EVENT_STATE_EXIT) in is_event_hup()
5786 mutex_lock(&event->child_mutex); in is_event_hup()
5787 no_children = list_empty(&event->child_list); in is_event_hup()
5788 mutex_unlock(&event->child_mutex); in is_event_hup()
5793 * Read the performance event - simple non blocking version for now
5796 __perf_read(struct perf_event *event, char __user *buf, size_t count) in __perf_read() argument
5798 u64 read_format = event->attr.read_format; in __perf_read()
5802 * Return end-of-file for a read on an event that is in in __perf_read()
5806 if (event->state == PERF_EVENT_STATE_ERROR) in __perf_read()
5809 if (count < event->read_size) in __perf_read()
5812 WARN_ON_ONCE(event->ctx->parent_ctx); in __perf_read()
5814 ret = perf_read_group(event, read_format, buf); in __perf_read()
5816 ret = perf_read_one(event, read_format, buf); in __perf_read()
5824 struct perf_event *event = file->private_data; in perf_read() local
5828 ret = security_perf_event_read(event); in perf_read()
5832 ctx = perf_event_ctx_lock(event); in perf_read()
5833 ret = __perf_read(event, buf, count); in perf_read()
5834 perf_event_ctx_unlock(event, ctx); in perf_read()
5841 struct perf_event *event = file->private_data; in perf_poll() local
5845 poll_wait(file, &event->waitq, wait); in perf_poll()
5847 if (is_event_hup(event)) in perf_poll()
5851 * Pin the event->rb by taking event->mmap_mutex; otherwise in perf_poll()
5854 mutex_lock(&event->mmap_mutex); in perf_poll()
5855 rb = event->rb; in perf_poll()
5858 mutex_unlock(&event->mmap_mutex); in perf_poll()
5862 static void _perf_event_reset(struct perf_event *event) in _perf_event_reset() argument
5864 (void)perf_event_read(event, false); in _perf_event_reset()
5865 local64_set(&event->count, 0); in _perf_event_reset()
5866 perf_event_update_userpage(event); in _perf_event_reset()
5869 /* Assume it's not an event with inherit set. */
5870 u64 perf_event_pause(struct perf_event *event, bool reset) in perf_event_pause() argument
5875 ctx = perf_event_ctx_lock(event); in perf_event_pause()
5876 WARN_ON_ONCE(event->attr.inherit); in perf_event_pause()
5877 _perf_event_disable(event); in perf_event_pause()
5878 count = local64_read(&event->count); in perf_event_pause()
5880 local64_set(&event->count, 0); in perf_event_pause()
5881 perf_event_ctx_unlock(event, ctx); in perf_event_pause()
5888 * Holding the top-level event's child_mutex means that any
5889 * descendant process that has inherited this event will block
5893 static void perf_event_for_each_child(struct perf_event *event, in perf_event_for_each_child() argument
5898 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_event_for_each_child()
5900 mutex_lock(&event->child_mutex); in perf_event_for_each_child()
5901 func(event); in perf_event_for_each_child()
5902 list_for_each_entry(child, &event->child_list, child_list) in perf_event_for_each_child()
5904 mutex_unlock(&event->child_mutex); in perf_event_for_each_child()
5907 static void perf_event_for_each(struct perf_event *event, in perf_event_for_each() argument
5910 struct perf_event_context *ctx = event->ctx; in perf_event_for_each()
5915 event = event->group_leader; in perf_event_for_each()
5917 perf_event_for_each_child(event, func); in perf_event_for_each()
5918 for_each_sibling_event(sibling, event) in perf_event_for_each()
5922 static void __perf_event_period(struct perf_event *event, in __perf_event_period() argument
5930 if (event->attr.freq) { in __perf_event_period()
5931 event->attr.sample_freq = value; in __perf_event_period()
5933 event->attr.sample_period = value; in __perf_event_period()
5934 event->hw.sample_period = value; in __perf_event_period()
5937 active = (event->state == PERF_EVENT_STATE_ACTIVE); in __perf_event_period()
5939 perf_pmu_disable(event->pmu); in __perf_event_period()
5942 * trying to unthrottle while we already re-started the event. in __perf_event_period()
5944 if (event->hw.interrupts == MAX_INTERRUPTS) { in __perf_event_period()
5945 event->hw.interrupts = 0; in __perf_event_period()
5946 perf_log_throttle(event, 1); in __perf_event_period()
5948 event->pmu->stop(event, PERF_EF_UPDATE); in __perf_event_period()
5951 local64_set(&event->hw.period_left, 0); in __perf_event_period()
5954 event->pmu->start(event, PERF_EF_RELOAD); in __perf_event_period()
5955 perf_pmu_enable(event->pmu); in __perf_event_period()
5959 static int perf_event_check_period(struct perf_event *event, u64 value) in perf_event_check_period() argument
5961 return event->pmu->check_period(event, value); in perf_event_check_period()
5964 static int _perf_event_period(struct perf_event *event, u64 value) in _perf_event_period() argument
5966 if (!is_sampling_event(event)) in _perf_event_period()
5972 if (event->attr.freq && value > sysctl_perf_event_sample_rate) in _perf_event_period()
5975 if (perf_event_check_period(event, value)) in _perf_event_period()
5978 if (!event->attr.freq && (value & (1ULL << 63))) in _perf_event_period()
5981 event_function_call(event, __perf_event_period, &value); in _perf_event_period()
5986 int perf_event_period(struct perf_event *event, u64 value) in perf_event_period() argument
5991 ctx = perf_event_ctx_lock(event); in perf_event_period()
5992 ret = _perf_event_period(event, value); in perf_event_period()
5993 perf_event_ctx_unlock(event, ctx); in perf_event_period()
6015 static int perf_event_set_output(struct perf_event *event,
6017 static int perf_event_set_filter(struct perf_event *event, void __user *arg);
6021 static long _perf_ioctl(struct perf_event *event, unsigned int cmd, unsigned long arg) in _perf_ioctl() argument
6038 return _perf_event_refresh(event, arg); in _perf_ioctl()
6047 return _perf_event_period(event, value); in _perf_ioctl()
6051 u64 id = primary_event_id(event); in _perf_ioctl()
6068 ret = perf_event_set_output(event, output_event); in _perf_ioctl()
6071 ret = perf_event_set_output(event, NULL); in _perf_ioctl()
6077 return perf_event_set_filter(event, (void __user *)arg); in _perf_ioctl()
6088 err = perf_event_set_bpf_prog(event, prog, 0); in _perf_ioctl()
6101 rb = rcu_dereference(event->rb); in _perf_ioctl()
6112 return perf_event_query_prog_array(event, (void __user *)arg); in _perf_ioctl()
6122 return perf_event_modify_attr(event, &new_attr); in _perf_ioctl()
6129 perf_event_for_each(event, func); in _perf_ioctl()
6131 perf_event_for_each_child(event, func); in _perf_ioctl()
6138 struct perf_event *event = file->private_data; in perf_ioctl() local
6143 ret = security_perf_event_write(event); in perf_ioctl()
6147 ctx = perf_event_ctx_lock(event); in perf_ioctl()
6148 ret = _perf_ioctl(event, cmd, arg); in perf_ioctl()
6149 perf_event_ctx_unlock(event, ctx); in perf_ioctl()
6179 struct perf_event *event; in perf_event_task_enable() local
6182 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_enable()
6183 ctx = perf_event_ctx_lock(event); in perf_event_task_enable()
6184 perf_event_for_each_child(event, _perf_event_enable); in perf_event_task_enable()
6185 perf_event_ctx_unlock(event, ctx); in perf_event_task_enable()
6195 struct perf_event *event; in perf_event_task_disable() local
6198 list_for_each_entry(event, &current->perf_event_list, owner_entry) { in perf_event_task_disable()
6199 ctx = perf_event_ctx_lock(event); in perf_event_task_disable()
6200 perf_event_for_each_child(event, _perf_event_disable); in perf_event_task_disable()
6201 perf_event_ctx_unlock(event, ctx); in perf_event_task_disable()
6208 static int perf_event_index(struct perf_event *event) in perf_event_index() argument
6210 if (event->hw.state & PERF_HES_STOPPED) in perf_event_index()
6213 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_event_index()
6216 return event->pmu->event_idx(event); in perf_event_index()
6219 static void perf_event_init_userpage(struct perf_event *event) in perf_event_init_userpage() argument
6225 rb = rcu_dereference(event->rb); in perf_event_init_userpage()
6242 struct perf_event *event, struct perf_event_mmap_page *userpg, u64 now) in arch_perf_update_userpage() argument
6251 void perf_event_update_userpage(struct perf_event *event) in perf_event_update_userpage() argument
6258 rb = rcu_dereference(event->rb); in perf_event_update_userpage()
6264 * based on snapshot values taken when the event in perf_event_update_userpage()
6271 calc_timer_values(event, &now, &enabled, &running); in perf_event_update_userpage()
6281 userpg->index = perf_event_index(event); in perf_event_update_userpage()
6282 userpg->offset = perf_event_count(event, false); in perf_event_update_userpage()
6284 userpg->offset -= local64_read(&event->hw.prev_count); in perf_event_update_userpage()
6287 atomic64_read(&event->child_total_time_enabled); in perf_event_update_userpage()
6290 atomic64_read(&event->child_total_time_running); in perf_event_update_userpage()
6292 arch_perf_update_userpage(event, userpg, now); in perf_event_update_userpage()
6304 struct perf_event *event = vmf->vma->vm_file->private_data; in perf_mmap_fault() local
6315 rb = rcu_dereference(event->rb); in perf_mmap_fault()
6337 static void ring_buffer_attach(struct perf_event *event, in ring_buffer_attach() argument
6343 WARN_ON_ONCE(event->parent); in ring_buffer_attach()
6345 if (event->rb) { in ring_buffer_attach()
6348 * event->rb_entry and wait/clear when adding event->rb_entry. in ring_buffer_attach()
6350 WARN_ON_ONCE(event->rcu_pending); in ring_buffer_attach()
6352 old_rb = event->rb; in ring_buffer_attach()
6354 list_del_rcu(&event->rb_entry); in ring_buffer_attach()
6357 event->rcu_batches = get_state_synchronize_rcu(); in ring_buffer_attach()
6358 event->rcu_pending = 1; in ring_buffer_attach()
6362 if (event->rcu_pending) { in ring_buffer_attach()
6363 cond_synchronize_rcu(event->rcu_batches); in ring_buffer_attach()
6364 event->rcu_pending = 0; in ring_buffer_attach()
6368 list_add_rcu(&event->rb_entry, &rb->event_list); in ring_buffer_attach()
6373 * Avoid racing with perf_mmap_close(AUX): stop the event in ring_buffer_attach()
6374 * before swizzling the event::rb pointer; if it's getting in ring_buffer_attach()
6382 if (has_aux(event)) in ring_buffer_attach()
6383 perf_event_stop(event, 0); in ring_buffer_attach()
6385 rcu_assign_pointer(event->rb, rb); in ring_buffer_attach()
6394 wake_up_all(&event->waitq); in ring_buffer_attach()
6398 static void ring_buffer_wakeup(struct perf_event *event) in ring_buffer_wakeup() argument
6402 if (event->parent) in ring_buffer_wakeup()
6403 event = event->parent; in ring_buffer_wakeup()
6406 rb = rcu_dereference(event->rb); in ring_buffer_wakeup()
6408 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) in ring_buffer_wakeup()
6409 wake_up_all(&event->waitq); in ring_buffer_wakeup()
6414 struct perf_buffer *ring_buffer_get(struct perf_event *event) in ring_buffer_get() argument
6418 if (event->parent) in ring_buffer_get()
6419 event = event->parent; in ring_buffer_get()
6422 rb = rcu_dereference(event->rb); in ring_buffer_get()
6444 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_open() local
6446 atomic_inc(&event->mmap_count); in perf_mmap_open()
6447 atomic_inc(&event->rb->mmap_count); in perf_mmap_open()
6450 atomic_inc(&event->rb->aux_mmap_count); in perf_mmap_open()
6452 if (event->pmu->event_mapped) in perf_mmap_open()
6453 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap_open()
6456 static void perf_pmu_output_stop(struct perf_event *event);
6460 * event, or through other events by use of perf_event_set_output().
6468 struct perf_event *event = vma->vm_file->private_data; in perf_mmap_close() local
6469 struct perf_buffer *rb = ring_buffer_get(event); in perf_mmap_close()
6475 if (event->pmu->event_unmapped) in perf_mmap_close()
6476 event->pmu->event_unmapped(event, vma->vm_mm); in perf_mmap_close()
6490 perf_pmu_output_stop(event); in perf_mmap_close()
6506 if (!atomic_dec_and_mutex_lock(&event->mmap_count, &event->mmap_mutex)) in perf_mmap_close()
6509 ring_buffer_attach(event, NULL); in perf_mmap_close()
6510 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6523 list_for_each_entry_rcu(event, &rb->event_list, rb_entry) { in perf_mmap_close()
6524 if (!atomic_long_inc_not_zero(&event->refcount)) { in perf_mmap_close()
6526 * This event is en-route to free_event() which will in perf_mmap_close()
6533 mutex_lock(&event->mmap_mutex); in perf_mmap_close()
6539 * If we find a different rb; ignore this event, a next in perf_mmap_close()
6544 if (event->rb == rb) in perf_mmap_close()
6545 ring_buffer_attach(event, NULL); in perf_mmap_close()
6547 mutex_unlock(&event->mmap_mutex); in perf_mmap_close()
6548 put_event(event); in perf_mmap_close()
6585 struct perf_event *event = file->private_data; in perf_mmap() local
6601 if (event->cpu == -1 && event->attr.inherit) in perf_mmap()
6607 ret = security_perf_event_read(event); in perf_mmap()
6623 if (!event->rb) in perf_mmap()
6630 mutex_lock(&event->mmap_mutex); in perf_mmap()
6633 rb = event->rb; in perf_mmap()
6688 WARN_ON_ONCE(event->ctx->parent_ctx); in perf_mmap()
6690 mutex_lock(&event->mmap_mutex); in perf_mmap()
6691 if (event->rb) { in perf_mmap()
6692 if (data_page_nr(event->rb) != nr_pages) { in perf_mmap()
6697 if (!atomic_inc_not_zero(&event->rb->mmap_count)) { in perf_mmap()
6700 * event and try again. in perf_mmap()
6702 ring_buffer_attach(event, NULL); in perf_mmap()
6703 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6749 WARN_ON(!rb && event->rb); in perf_mmap()
6756 event->attr.watermark ? event->attr.wakeup_watermark : 0, in perf_mmap()
6757 event->cpu, flags); in perf_mmap()
6768 ring_buffer_attach(event, rb); in perf_mmap()
6770 perf_event_update_time(event); in perf_mmap()
6771 perf_event_init_userpage(event); in perf_mmap()
6772 perf_event_update_userpage(event); in perf_mmap()
6774 ret = rb_alloc_aux(rb, event, vma->vm_pgoff, nr_pages, in perf_mmap()
6775 event->attr.aux_watermark, flags); in perf_mmap()
6785 atomic_inc(&event->mmap_count); in perf_mmap()
6792 mutex_unlock(&event->mmap_mutex); in perf_mmap()
6801 if (event->pmu->event_mapped) in perf_mmap()
6802 event->pmu->event_mapped(event, vma->vm_mm); in perf_mmap()
6810 struct perf_event *event = filp->private_data; in perf_fasync() local
6814 retval = fasync_helper(fd, filp, on, &event->fasync); in perf_fasync()
6834 * Perf event wakeup
6840 void perf_event_wakeup(struct perf_event *event) in perf_event_wakeup() argument
6842 ring_buffer_wakeup(event); in perf_event_wakeup()
6844 if (event->pending_kill) { in perf_event_wakeup()
6845 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill); in perf_event_wakeup()
6846 event->pending_kill = 0; in perf_event_wakeup()
6850 static void perf_sigtrap(struct perf_event *event) in perf_sigtrap() argument
6857 if (WARN_ON_ONCE(event->ctx->task != current)) in perf_sigtrap()
6867 send_sig_perf((void __user *)event->pending_addr, in perf_sigtrap()
6868 event->orig_type, event->attr.sig_data); in perf_sigtrap()
6872 * Deliver the pending work in-event-context or follow the context.
6874 static void __perf_pending_disable(struct perf_event *event) in __perf_pending_disable() argument
6876 int cpu = READ_ONCE(event->oncpu); in __perf_pending_disable()
6879 * If the event isn't running; we done. event_sched_out() will have in __perf_pending_disable()
6886 * Yay, we hit home and are in the context of the event. in __perf_pending_disable()
6889 if (event->pending_disable) { in __perf_pending_disable()
6890 event->pending_disable = 0; in __perf_pending_disable()
6891 perf_event_disable_local(event); in __perf_pending_disable()
6914 * But the event runs on CPU-B and wants disabling there. in __perf_pending_disable()
6916 irq_work_queue_on(&event->pending_disable_irq, cpu); in __perf_pending_disable()
6921 struct perf_event *event = container_of(entry, struct perf_event, pending_disable_irq); in perf_pending_disable() local
6929 __perf_pending_disable(event); in perf_pending_disable()
6936 struct perf_event *event = container_of(entry, struct perf_event, pending_irq); in perf_pending_irq() local
6946 * The wakeup isn't bound to the context of the event -- it can happen in perf_pending_irq()
6947 * irrespective of where the event is. in perf_pending_irq()
6949 if (event->pending_wakeup) { in perf_pending_irq()
6950 event->pending_wakeup = 0; in perf_pending_irq()
6951 perf_event_wakeup(event); in perf_pending_irq()
6960 struct perf_event *event = container_of(head, struct perf_event, pending_task); in perf_pending_task() local
6964 * All accesses to the event must belong to the same implicit RCU read-side in perf_pending_task()
6975 if (event->pending_work) { in perf_pending_task()
6976 event->pending_work = 0; in perf_pending_task()
6977 perf_sigtrap(event); in perf_pending_task()
6978 local_dec(&event->ctx->nr_no_switch_fast); in perf_pending_task()
6979 rcuwait_wake_up(&event->pending_work_wait); in perf_pending_task()
7158 static unsigned long perf_prepare_sample_aux(struct perf_event *event, in perf_prepare_sample_aux() argument
7162 struct perf_event *sampler = event->aux_event; in perf_prepare_sample_aux()
7197 struct perf_event *event, in perf_pmu_snapshot_aux() argument
7207 * the IRQ ones, that is, for example, re-starting an event that's just in perf_pmu_snapshot_aux()
7209 * doesn't change the event state. in perf_pmu_snapshot_aux()
7221 ret = event->pmu->snapshot_aux(event, handle, size); in perf_pmu_snapshot_aux()
7230 static void perf_aux_sample_output(struct perf_event *event, in perf_aux_sample_output() argument
7234 struct perf_event *sampler = event->aux_event; in perf_aux_sample_output()
7276 * when event->attr.sample_id_all is set.
7283 struct perf_event *event, in __perf_event_header__init_id() argument
7286 data->type = event->attr.sample_type; in __perf_event_header__init_id()
7291 data->tid_entry.pid = perf_event_pid(event, current); in __perf_event_header__init_id()
7292 data->tid_entry.tid = perf_event_tid(event, current); in __perf_event_header__init_id()
7296 data->time = perf_event_clock(event); in __perf_event_header__init_id()
7299 data->id = primary_event_id(event); in __perf_event_header__init_id()
7302 data->stream_id = event->id; in __perf_event_header__init_id()
7312 struct perf_event *event) in perf_event_header__init_id() argument
7314 if (event->attr.sample_id_all) { in perf_event_header__init_id()
7315 header->size += event->id_header_size; in perf_event_header__init_id()
7316 __perf_event_header__init_id(data, event, event->attr.sample_type); in perf_event_header__init_id()
7344 void perf_event__output_id_sample(struct perf_event *event, in perf_event__output_id_sample() argument
7348 if (event->attr.sample_id_all) in perf_event__output_id_sample()
7353 struct perf_event *event, in perf_output_read_one() argument
7356 u64 read_format = event->attr.read_format; in perf_output_read_one()
7360 values[n++] = perf_event_count(event, has_inherit_and_sample_read(&event->attr)); in perf_output_read_one()
7363 atomic64_read(&event->child_total_time_enabled); in perf_output_read_one()
7367 atomic64_read(&event->child_total_time_running); in perf_output_read_one()
7370 values[n++] = primary_event_id(event); in perf_output_read_one()
7372 values[n++] = atomic64_read(&event->lost_samples); in perf_output_read_one()
7378 struct perf_event *event, in perf_output_read_group() argument
7381 struct perf_event *leader = event->group_leader, *sub; in perf_output_read_group()
7382 u64 read_format = event->attr.read_format; in perf_output_read_group()
7386 bool self = has_inherit_and_sample_read(&event->attr); in perf_output_read_group()
7402 if ((leader != event) && in perf_output_read_group()
7417 if ((sub != event) && in perf_output_read_group()
7448 struct perf_event *event) in perf_output_read() argument
7451 u64 read_format = event->attr.read_format; in perf_output_read()
7455 * based on snapshot values taken when the event in perf_output_read()
7463 calc_timer_values(event, &now, &enabled, &running); in perf_output_read()
7465 if (event->attr.read_format & PERF_FORMAT_GROUP) in perf_output_read()
7466 perf_output_read_group(handle, event, enabled, running); in perf_output_read()
7468 perf_output_read_one(handle, event, enabled, running); in perf_output_read()
7474 struct perf_event *event) in perf_output_sample() argument
7508 perf_output_read(handle, event); in perf_output_sample()
7559 if (branch_sample_hw_index(event)) in perf_output_sample()
7589 u64 mask = event->attr.sample_regs_user; in perf_output_sample()
7620 u64 mask = event->attr.sample_regs_intr; in perf_output_sample()
7644 perf_aux_sample_output(event, handle, data); in perf_output_sample()
7647 if (!event->attr.watermark) { in perf_output_sample()
7648 int wakeup_events = event->attr.wakeup_events; in perf_output_sample()
7791 perf_callchain(struct perf_event *event, struct pt_regs *regs) in perf_callchain() argument
7793 bool kernel = !event->attr.exclude_callchain_kernel; in perf_callchain()
7794 bool user = !event->attr.exclude_callchain_user; in perf_callchain()
7796 bool crosstask = event->ctx->task && event->ctx->task != current; in perf_callchain()
7797 const u32 max_stack = event->attr.sample_max_stack; in perf_callchain()
7814 struct perf_event *event, in perf_prepare_sample() argument
7817 u64 sample_type = event->attr.sample_type; in perf_prepare_sample()
7835 data->type = event->attr.sample_type; in perf_prepare_sample()
7839 __perf_event_header__init_id(data, event, filtered_sample_type); in perf_prepare_sample()
7847 perf_sample_save_callchain(data, event, regs); in perf_prepare_sample()
7874 u64 mask = event->attr.sample_regs_user; in perf_prepare_sample()
7889 u16 stack_size = event->attr.sample_stack_user; in perf_prepare_sample()
7890 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7936 u64 mask = event->attr.sample_regs_intr; in perf_prepare_sample()
7978 u16 header_size = perf_sample_data_size(data, event); in perf_prepare_sample()
7989 event->attr.aux_sample_size); in perf_prepare_sample()
7991 size = perf_prepare_sample_aux(event, data, size); in perf_prepare_sample()
8001 struct perf_event *event, in perf_prepare_header() argument
8005 header->size = perf_sample_data_size(data, event); in perf_prepare_header()
8020 __perf_event_output(struct perf_event *event, in __perf_event_output() argument
8035 perf_prepare_sample(data, event, regs); in __perf_event_output()
8036 perf_prepare_header(&header, data, event, regs); in __perf_event_output()
8038 err = output_begin(&handle, data, event, header.size); in __perf_event_output()
8042 perf_output_sample(&handle, &header, data, event); in __perf_event_output()
8052 perf_event_output_forward(struct perf_event *event, in perf_event_output_forward() argument
8056 __perf_event_output(event, data, regs, perf_output_begin_forward); in perf_event_output_forward()
8060 perf_event_output_backward(struct perf_event *event, in perf_event_output_backward() argument
8064 __perf_event_output(event, data, regs, perf_output_begin_backward); in perf_event_output_backward()
8068 perf_event_output(struct perf_event *event, in perf_event_output() argument
8072 return __perf_event_output(event, data, regs, perf_output_begin); in perf_event_output()
8087 perf_event_read_event(struct perf_event *event, in perf_event_read_event() argument
8096 .size = sizeof(read_event) + event->read_size, in perf_event_read_event()
8098 .pid = perf_event_pid(event, task), in perf_event_read_event()
8099 .tid = perf_event_tid(event, task), in perf_event_read_event()
8103 perf_event_header__init_id(&read_event.header, &sample, event); in perf_event_read_event()
8104 ret = perf_output_begin(&handle, &sample, event, read_event.header.size); in perf_event_read_event()
8109 perf_output_read(&handle, event); in perf_event_read_event()
8110 perf_event__output_id_sample(event, &handle, &sample); in perf_event_read_event()
8115 typedef void (perf_iterate_f)(struct perf_event *event, void *data);
8122 struct perf_event *event; in perf_iterate_ctx() local
8124 list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { in perf_iterate_ctx()
8126 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_ctx()
8128 if (!event_filter_match(event)) in perf_iterate_ctx()
8132 output(event, data); in perf_iterate_ctx()
8139 struct perf_event *event; in perf_iterate_sb_cpu() local
8141 list_for_each_entry_rcu(event, &pel->list, sb_list) { in perf_iterate_sb_cpu()
8144 * if we observe event->ctx, both event and ctx will be in perf_iterate_sb_cpu()
8147 if (!smp_load_acquire(&event->ctx)) in perf_iterate_sb_cpu()
8150 if (event->state < PERF_EVENT_STATE_INACTIVE) in perf_iterate_sb_cpu()
8152 if (!event_filter_match(event)) in perf_iterate_sb_cpu()
8154 output(event, data); in perf_iterate_sb_cpu()
8162 * your event, otherwise it might not get delivered.
8197 static void perf_event_addr_filters_exec(struct perf_event *event, void *data) in perf_event_addr_filters_exec() argument
8199 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_exec()
8204 if (!has_addr_filter(event)) in perf_event_addr_filters_exec()
8210 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_exec()
8211 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_exec()
8219 event->addr_filters_gen++; in perf_event_addr_filters_exec()
8223 perf_event_stop(event, 1); in perf_event_addr_filters_exec()
8247 static void __perf_event_output_stop(struct perf_event *event, void *data) in __perf_event_output_stop() argument
8249 struct perf_event *parent = event->parent; in __perf_event_output_stop()
8253 .event = event, in __perf_event_output_stop()
8256 if (!has_aux(event)) in __perf_event_output_stop()
8260 parent = event; in __perf_event_output_stop()
8266 * We are using event::rb to determine if the event should be stopped, in __perf_event_output_stop()
8268 * which will make us skip the event that actually needs to be stopped. in __perf_event_output_stop()
8269 * So ring_buffer_attach() has to stop an aux event before re-assigning in __perf_event_output_stop()
8278 struct perf_event *event = info; in __perf_pmu_output_stop() local
8281 .rb = event->rb, in __perf_pmu_output_stop()
8294 static void perf_pmu_output_stop(struct perf_event *event) in perf_pmu_output_stop() argument
8301 list_for_each_entry_rcu(iter, &event->rb->event_list, rb_entry) { in perf_pmu_output_stop()
8305 * sufficient to stop the event itself if it's active, since in perf_pmu_output_stop()
8315 err = cpu_function_call(cpu, __perf_pmu_output_stop, event); in perf_pmu_output_stop()
8345 static int perf_event_task_match(struct perf_event *event) in perf_event_task_match() argument
8347 return event->attr.comm || event->attr.mmap || in perf_event_task_match()
8348 event->attr.mmap2 || event->attr.mmap_data || in perf_event_task_match()
8349 event->attr.task; in perf_event_task_match()
8352 static void perf_event_task_output(struct perf_event *event, in perf_event_task_output() argument
8361 if (!perf_event_task_match(event)) in perf_event_task_output()
8364 perf_event_header__init_id(&task_event->event_id.header, &sample, event); in perf_event_task_output()
8366 ret = perf_output_begin(&handle, &sample, event, in perf_event_task_output()
8371 task_event->event_id.pid = perf_event_pid(event, task); in perf_event_task_output()
8372 task_event->event_id.tid = perf_event_tid(event, task); in perf_event_task_output()
8375 task_event->event_id.ppid = perf_event_pid(event, in perf_event_task_output()
8377 task_event->event_id.ptid = perf_event_pid(event, in perf_event_task_output()
8380 task_event->event_id.ppid = perf_event_pid(event, current); in perf_event_task_output()
8381 task_event->event_id.ptid = perf_event_tid(event, current); in perf_event_task_output()
8384 task_event->event_id.time = perf_event_clock(event); in perf_event_task_output()
8388 perf_event__output_id_sample(event, &handle, &sample); in perf_event_task_output()
8451 static int perf_event_comm_match(struct perf_event *event) in perf_event_comm_match() argument
8453 return event->attr.comm; in perf_event_comm_match()
8456 static void perf_event_comm_output(struct perf_event *event, in perf_event_comm_output() argument
8465 if (!perf_event_comm_match(event)) in perf_event_comm_output()
8468 perf_event_header__init_id(&comm_event->event_id.header, &sample, event); in perf_event_comm_output()
8469 ret = perf_output_begin(&handle, &sample, event, in perf_event_comm_output()
8475 comm_event->event_id.pid = perf_event_pid(event, comm_event->task); in perf_event_comm_output()
8476 comm_event->event_id.tid = perf_event_tid(event, comm_event->task); in perf_event_comm_output()
8482 perf_event__output_id_sample(event, &handle, &sample); in perf_event_comm_output()
8550 static int perf_event_namespaces_match(struct perf_event *event) in perf_event_namespaces_match() argument
8552 return event->attr.namespaces; in perf_event_namespaces_match()
8555 static void perf_event_namespaces_output(struct perf_event *event, in perf_event_namespaces_output() argument
8564 if (!perf_event_namespaces_match(event)) in perf_event_namespaces_output()
8568 &sample, event); in perf_event_namespaces_output()
8569 ret = perf_output_begin(&handle, &sample, event, in perf_event_namespaces_output()
8574 namespaces_event->event_id.pid = perf_event_pid(event, in perf_event_namespaces_output()
8576 namespaces_event->event_id.tid = perf_event_tid(event, in perf_event_namespaces_output()
8581 perf_event__output_id_sample(event, &handle, &sample); in perf_event_namespaces_output()
8678 static int perf_event_cgroup_match(struct perf_event *event) in perf_event_cgroup_match() argument
8680 return event->attr.cgroup; in perf_event_cgroup_match()
8683 static void perf_event_cgroup_output(struct perf_event *event, void *data) in perf_event_cgroup_output() argument
8691 if (!perf_event_cgroup_match(event)) in perf_event_cgroup_output()
8695 &sample, event); in perf_event_cgroup_output()
8696 ret = perf_output_begin(&handle, &sample, event, in perf_event_cgroup_output()
8704 perf_event__output_id_sample(event, &handle, &sample); in perf_event_cgroup_output()
8789 static int perf_event_mmap_match(struct perf_event *event, in perf_event_mmap_match() argument
8796 return (!executable && event->attr.mmap_data) || in perf_event_mmap_match()
8797 (executable && (event->attr.mmap || event->attr.mmap2)); in perf_event_mmap_match()
8800 static void perf_event_mmap_output(struct perf_event *event, in perf_event_mmap_output() argument
8811 if (!perf_event_mmap_match(event, data)) in perf_event_mmap_output()
8814 if (event->attr.mmap2) { in perf_event_mmap_output()
8824 perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); in perf_event_mmap_output()
8825 ret = perf_output_begin(&handle, &sample, event, in perf_event_mmap_output()
8830 mmap_event->event_id.pid = perf_event_pid(event, current); in perf_event_mmap_output()
8831 mmap_event->event_id.tid = perf_event_tid(event, current); in perf_event_mmap_output()
8833 use_build_id = event->attr.build_id && mmap_event->build_id_size; in perf_event_mmap_output()
8835 if (event->attr.mmap2 && use_build_id) in perf_event_mmap_output()
8840 if (event->attr.mmap2) { in perf_event_mmap_output()
8859 perf_event__output_id_sample(event, &handle, &sample); in perf_event_mmap_output()
9020 static void __perf_addr_filters_adjust(struct perf_event *event, void *data) in __perf_addr_filters_adjust() argument
9022 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in __perf_addr_filters_adjust()
9028 if (!has_addr_filter(event)) in __perf_addr_filters_adjust()
9037 &event->addr_filter_ranges[count])) in __perf_addr_filters_adjust()
9044 event->addr_filters_gen++; in __perf_addr_filters_adjust()
9048 perf_event_stop(event, 1); in __perf_addr_filters_adjust()
9107 void perf_event_aux_event(struct perf_event *event, unsigned long head, in perf_event_aux_event() argument
9129 perf_event_header__init_id(&rec.header, &sample, event); in perf_event_aux_event()
9130 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_event_aux_event()
9136 perf_event__output_id_sample(event, &handle, &sample); in perf_event_aux_event()
9144 void perf_log_lost_samples(struct perf_event *event, u64 lost) in perf_log_lost_samples() argument
9162 perf_event_header__init_id(&lost_samples_event.header, &sample, event); in perf_log_lost_samples()
9164 ret = perf_output_begin(&handle, &sample, event, in perf_log_lost_samples()
9170 perf_event__output_id_sample(event, &handle, &sample); in perf_log_lost_samples()
9189 static int perf_event_switch_match(struct perf_event *event) in perf_event_switch_match() argument
9191 return event->attr.context_switch; in perf_event_switch_match()
9194 static void perf_event_switch_output(struct perf_event *event, void *data) in perf_event_switch_output() argument
9201 if (!perf_event_switch_match(event)) in perf_event_switch_output()
9205 if (event->ctx->task) { in perf_event_switch_output()
9212 perf_event_pid(event, se->next_prev); in perf_event_switch_output()
9214 perf_event_tid(event, se->next_prev); in perf_event_switch_output()
9217 perf_event_header__init_id(&se->event_id.header, &sample, event); in perf_event_switch_output()
9219 ret = perf_output_begin(&handle, &sample, event, se->event_id.header.size); in perf_event_switch_output()
9223 if (event->ctx->task) in perf_event_switch_output()
9228 perf_event__output_id_sample(event, &handle, &sample); in perf_event_switch_output()
9266 static void perf_log_throttle(struct perf_event *event, int enable) in perf_log_throttle() argument
9283 .time = perf_event_clock(event), in perf_log_throttle()
9284 .id = primary_event_id(event), in perf_log_throttle()
9285 .stream_id = event->id, in perf_log_throttle()
9291 perf_event_header__init_id(&throttle_event.header, &sample, event); in perf_log_throttle()
9293 ret = perf_output_begin(&handle, &sample, event, in perf_log_throttle()
9299 perf_event__output_id_sample(event, &handle, &sample); in perf_log_throttle()
9319 static int perf_event_ksymbol_match(struct perf_event *event) in perf_event_ksymbol_match() argument
9321 return event->attr.ksymbol; in perf_event_ksymbol_match()
9324 static void perf_event_ksymbol_output(struct perf_event *event, void *data) in perf_event_ksymbol_output() argument
9331 if (!perf_event_ksymbol_match(event)) in perf_event_ksymbol_output()
9335 &sample, event); in perf_event_ksymbol_output()
9336 ret = perf_output_begin(&handle, &sample, event, in perf_event_ksymbol_output()
9343 perf_event__output_id_sample(event, &handle, &sample); in perf_event_ksymbol_output()
9409 static int perf_event_bpf_match(struct perf_event *event) in perf_event_bpf_match() argument
9411 return event->attr.bpf_event; in perf_event_bpf_match()
9414 static void perf_event_bpf_output(struct perf_event *event, void *data) in perf_event_bpf_output() argument
9421 if (!perf_event_bpf_match(event)) in perf_event_bpf_output()
9425 &sample, event); in perf_event_bpf_output()
9426 ret = perf_output_begin(&handle, &sample, event, in perf_event_bpf_output()
9432 perf_event__output_id_sample(event, &handle, &sample); in perf_event_bpf_output()
9511 static int perf_event_text_poke_match(struct perf_event *event) in perf_event_text_poke_match() argument
9513 return event->attr.text_poke; in perf_event_text_poke_match()
9516 static void perf_event_text_poke_output(struct perf_event *event, void *data) in perf_event_text_poke_output() argument
9524 if (!perf_event_text_poke_match(event)) in perf_event_text_poke_output()
9527 perf_event_header__init_id(&text_poke_event->event_id.header, &sample, event); in perf_event_text_poke_output()
9529 ret = perf_output_begin(&handle, &sample, event, in perf_event_text_poke_output()
9544 perf_event__output_id_sample(event, &handle, &sample); in perf_event_text_poke_output()
9581 void perf_event_itrace_started(struct perf_event *event) in perf_event_itrace_started() argument
9583 event->attach_state |= PERF_ATTACH_ITRACE; in perf_event_itrace_started()
9586 static void perf_log_itrace_start(struct perf_event *event) in perf_log_itrace_start() argument
9597 if (event->parent) in perf_log_itrace_start()
9598 event = event->parent; in perf_log_itrace_start()
9600 if (!(event->pmu->capabilities & PERF_PMU_CAP_ITRACE) || in perf_log_itrace_start()
9601 event->attach_state & PERF_ATTACH_ITRACE) in perf_log_itrace_start()
9607 rec.pid = perf_event_pid(event, current); in perf_log_itrace_start()
9608 rec.tid = perf_event_tid(event, current); in perf_log_itrace_start()
9610 perf_event_header__init_id(&rec.header, &sample, event); in perf_log_itrace_start()
9611 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_log_itrace_start()
9617 perf_event__output_id_sample(event, &handle, &sample); in perf_log_itrace_start()
9622 void perf_report_aux_output_id(struct perf_event *event, u64 hw_id) in perf_report_aux_output_id() argument
9632 if (event->parent) in perf_report_aux_output_id()
9633 event = event->parent; in perf_report_aux_output_id()
9640 perf_event_header__init_id(&rec.header, &sample, event); in perf_report_aux_output_id()
9641 ret = perf_output_begin(&handle, &sample, event, rec.header.size); in perf_report_aux_output_id()
9647 perf_event__output_id_sample(event, &handle, &sample); in perf_report_aux_output_id()
9654 __perf_event_account_interrupt(struct perf_event *event, int throttle) in __perf_event_account_interrupt() argument
9656 struct hw_perf_event *hwc = &event->hw; in __perf_event_account_interrupt()
9671 perf_log_throttle(event, 0); in __perf_event_account_interrupt()
9676 if (event->attr.freq) { in __perf_event_account_interrupt()
9683 perf_adjust_period(event, delta, hwc->last_period, true); in __perf_event_account_interrupt()
9689 int perf_event_account_interrupt(struct perf_event *event) in perf_event_account_interrupt() argument
9691 return __perf_event_account_interrupt(event, 1); in perf_event_account_interrupt()
9694 static inline bool sample_is_allowed(struct perf_event *event, struct pt_regs *regs) in sample_is_allowed() argument
9701 if (event->attr.exclude_kernel && !user_mode(regs)) in sample_is_allowed()
9708 static int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9714 .event = event, in bpf_overflow_handler()
9723 prog = READ_ONCE(event->prog); in bpf_overflow_handler()
9725 perf_prepare_sample(data, event, regs); in bpf_overflow_handler()
9735 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9739 if (event->overflow_handler_context) in perf_event_set_bpf_handler()
9743 if (event->prog) in perf_event_set_bpf_handler()
9749 if (event->attr.precise_ip && in perf_event_set_bpf_handler()
9751 (!(event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) || in perf_event_set_bpf_handler()
9752 event->attr.exclude_callchain_kernel || in perf_event_set_bpf_handler()
9753 event->attr.exclude_callchain_user)) { in perf_event_set_bpf_handler()
9766 event->prog = prog; in perf_event_set_bpf_handler()
9767 event->bpf_cookie = bpf_cookie; in perf_event_set_bpf_handler()
9771 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9773 struct bpf_prog *prog = event->prog; in perf_event_free_bpf_handler()
9778 event->prog = NULL; in perf_event_free_bpf_handler()
9782 static inline int bpf_overflow_handler(struct perf_event *event, in bpf_overflow_handler() argument
9789 static inline int perf_event_set_bpf_handler(struct perf_event *event, in perf_event_set_bpf_handler() argument
9796 static inline void perf_event_free_bpf_handler(struct perf_event *event) in perf_event_free_bpf_handler() argument
9802 * Generic event overflow handling, sampling.
9805 static int __perf_event_overflow(struct perf_event *event, in __perf_event_overflow() argument
9809 int events = atomic_read(&event->event_limit); in __perf_event_overflow()
9816 if (unlikely(!is_sampling_event(event))) in __perf_event_overflow()
9819 ret = __perf_event_account_interrupt(event, throttle); in __perf_event_overflow()
9821 if (event->prog && event->prog->type == BPF_PROG_TYPE_PERF_EVENT && in __perf_event_overflow()
9822 !bpf_overflow_handler(event, data, regs)) in __perf_event_overflow()
9830 event->pending_kill = POLL_IN; in __perf_event_overflow()
9831 if (events && atomic_dec_and_test(&event->event_limit)) { in __perf_event_overflow()
9833 event->pending_kill = POLL_HUP; in __perf_event_overflow()
9834 perf_event_disable_inatomic(event); in __perf_event_overflow()
9837 if (event->attr.sigtrap) { in __perf_event_overflow()
9841 * it is the first event, on the other hand, we should also not in __perf_event_overflow()
9844 bool valid_sample = sample_is_allowed(event, regs); in __perf_event_overflow()
9853 if (!event->pending_work && in __perf_event_overflow()
9854 !task_work_add(current, &event->pending_task, notify_mode)) { in __perf_event_overflow()
9855 event->pending_work = pending_id; in __perf_event_overflow()
9856 local_inc(&event->ctx->nr_no_switch_fast); in __perf_event_overflow()
9858 event->pending_addr = 0; in __perf_event_overflow()
9860 event->pending_addr = data->addr; in __perf_event_overflow()
9862 } else if (event->attr.exclude_kernel && valid_sample) { in __perf_event_overflow()
9875 WARN_ON_ONCE(event->pending_work != pending_id); in __perf_event_overflow()
9879 READ_ONCE(event->overflow_handler)(event, data, regs); in __perf_event_overflow() local
9881 if (*perf_event_fasync(event) && event->pending_kill) { in __perf_event_overflow()
9882 event->pending_wakeup = 1; in __perf_event_overflow()
9883 irq_work_queue(&event->pending_irq); in __perf_event_overflow()
9889 int perf_event_overflow(struct perf_event *event, in perf_event_overflow() argument
9893 return __perf_event_overflow(event, 1, data, regs); in perf_event_overflow()
9897 * Generic software event infrastructure
9908 * We directly increment event->count and keep a second value in
9909 * event->hw.period_left to count intervals. This period event
9914 u64 perf_swevent_set_period(struct perf_event *event) in perf_swevent_set_period() argument
9916 struct hw_perf_event *hwc = &event->hw; in perf_swevent_set_period()
9937 static void perf_swevent_overflow(struct perf_event *event, u64 overflow, in perf_swevent_overflow() argument
9941 struct hw_perf_event *hwc = &event->hw; in perf_swevent_overflow()
9945 overflow = perf_swevent_set_period(event); in perf_swevent_overflow()
9951 if (__perf_event_overflow(event, throttle, in perf_swevent_overflow()
9963 static void perf_swevent_event(struct perf_event *event, u64 nr, in perf_swevent_event() argument
9967 struct hw_perf_event *hwc = &event->hw; in perf_swevent_event()
9969 local64_add(nr, &event->count); in perf_swevent_event()
9974 if (!is_sampling_event(event)) in perf_swevent_event()
9977 if ((event->attr.sample_type & PERF_SAMPLE_PERIOD) && !event->attr.freq) { in perf_swevent_event()
9979 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9981 data->period = event->hw.last_period; in perf_swevent_event()
9983 if (nr == 1 && hwc->sample_period == 1 && !event->attr.freq) in perf_swevent_event()
9984 return perf_swevent_overflow(event, 1, data, regs); in perf_swevent_event()
9989 perf_swevent_overflow(event, 0, data, regs); in perf_swevent_event()
9992 static int perf_exclude_event(struct perf_event *event, in perf_exclude_event() argument
9995 if (event->hw.state & PERF_HES_STOPPED) in perf_exclude_event()
9999 if (event->attr.exclude_user && user_mode(regs)) in perf_exclude_event()
10002 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_exclude_event()
10009 static int perf_swevent_match(struct perf_event *event, in perf_swevent_match() argument
10015 if (event->attr.type != type) in perf_swevent_match()
10018 if (event->attr.config != event_id) in perf_swevent_match()
10021 if (perf_exclude_event(event, regs)) in perf_swevent_match()
10055 /* For the event head insertion and removal in the hlist */
10057 find_swevent_head(struct swevent_htable *swhash, struct perf_event *event) in find_swevent_head() argument
10060 u32 event_id = event->attr.config; in find_swevent_head()
10061 u64 type = event->attr.type; in find_swevent_head()
10064 * Event scheduling is always serialized against hlist allocation in find_swevent_head()
10069 lockdep_is_held(&event->ctx->lock)); in find_swevent_head()
10082 struct perf_event *event; in do_perf_sw_event() local
10090 hlist_for_each_entry_rcu(event, head, hlist_entry) { in do_perf_sw_event()
10091 if (perf_swevent_match(event, type, event_id, data, regs)) in do_perf_sw_event()
10092 perf_swevent_event(event, nr, data, regs); in do_perf_sw_event()
10138 static void perf_swevent_read(struct perf_event *event) in perf_swevent_read() argument
10142 static int perf_swevent_add(struct perf_event *event, int flags) in perf_swevent_add() argument
10145 struct hw_perf_event *hwc = &event->hw; in perf_swevent_add()
10148 if (is_sampling_event(event)) { in perf_swevent_add()
10150 perf_swevent_set_period(event); in perf_swevent_add()
10155 head = find_swevent_head(swhash, event); in perf_swevent_add()
10159 hlist_add_head_rcu(&event->hlist_entry, head); in perf_swevent_add()
10160 perf_event_update_userpage(event); in perf_swevent_add()
10165 static void perf_swevent_del(struct perf_event *event, int flags) in perf_swevent_del() argument
10167 hlist_del_rcu(&event->hlist_entry); in perf_swevent_del()
10170 static void perf_swevent_start(struct perf_event *event, int flags) in perf_swevent_start() argument
10172 event->hw.state = 0; in perf_swevent_start()
10175 static void perf_swevent_stop(struct perf_event *event, int flags) in perf_swevent_stop() argument
10177 event->hw.state = PERF_HES_STOPPED; in perf_swevent_stop()
10269 static void sw_perf_event_destroy(struct perf_event *event) in sw_perf_event_destroy() argument
10271 u64 event_id = event->attr.config; in sw_perf_event_destroy()
10273 WARN_ON(event->parent); in sw_perf_event_destroy()
10282 static int perf_swevent_init(struct perf_event *event) in perf_swevent_init() argument
10284 u64 event_id = event->attr.config; in perf_swevent_init()
10286 if (event->attr.type != PERF_TYPE_SOFTWARE) in perf_swevent_init()
10292 if (has_branch_stack(event)) in perf_swevent_init()
10297 event->attr.type = perf_cpu_clock.type; in perf_swevent_init()
10300 event->attr.type = perf_task_clock.type; in perf_swevent_init()
10310 if (!event->parent) { in perf_swevent_init()
10318 event->destroy = sw_perf_event_destroy; in perf_swevent_init()
10339 static void tp_perf_event_destroy(struct perf_event *event) in tp_perf_event_destroy() argument
10341 perf_trace_destroy(event); in tp_perf_event_destroy()
10344 static int perf_tp_event_init(struct perf_event *event) in perf_tp_event_init() argument
10348 if (event->attr.type != PERF_TYPE_TRACEPOINT) in perf_tp_event_init()
10354 if (has_branch_stack(event)) in perf_tp_event_init()
10357 err = perf_trace_init(event); in perf_tp_event_init()
10361 event->destroy = tp_perf_event_destroy; in perf_tp_event_init()
10377 static int perf_tp_filter_match(struct perf_event *event, in perf_tp_filter_match() argument
10383 if (event->parent) in perf_tp_filter_match()
10384 event = event->parent; in perf_tp_filter_match()
10386 if (likely(!event->filter) || filter_match_preds(event->filter, record)) in perf_tp_filter_match()
10391 static int perf_tp_event_match(struct perf_event *event, in perf_tp_event_match() argument
10395 if (event->hw.state & PERF_HES_STOPPED) in perf_tp_event_match()
10400 if (event->attr.exclude_kernel && !user_mode(regs)) in perf_tp_event_match()
10403 if (!perf_tp_filter_match(event, data)) in perf_tp_event_match()
10421 perf_tp_event(call->event.type, count, raw_data, size, regs, head, in perf_trace_run_bpf_submit()
10429 struct perf_event *event) in __perf_tp_event_target_task() argument
10433 if (event->attr.config != entry->type) in __perf_tp_event_target_task()
10436 if (event->attr.sigtrap) in __perf_tp_event_target_task()
10438 if (perf_tp_event_match(event, data, regs)) in __perf_tp_event_target_task()
10439 perf_swevent_event(event, count, data, regs); in __perf_tp_event_target_task()
10449 struct perf_event *event, *sibling; in perf_tp_event_target_task() local
10451 perf_event_groups_for_cpu_pmu(event, &ctx->pinned_groups, cpu, pmu) { in perf_tp_event_target_task()
10452 __perf_tp_event_target_task(count, record, regs, data, event); in perf_tp_event_target_task()
10453 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10457 perf_event_groups_for_cpu_pmu(event, &ctx->flexible_groups, cpu, pmu) { in perf_tp_event_target_task()
10458 __perf_tp_event_target_task(count, record, regs, data, event); in perf_tp_event_target_task()
10459 for_each_sibling_event(sibling, event) in perf_tp_event_target_task()
10469 struct perf_event *event; in perf_tp_event() local
10483 hlist_for_each_entry_rcu(event, head, hlist_entry) { in perf_tp_event()
10484 if (perf_tp_event_match(event, &data, regs)) { in perf_tp_event()
10485 perf_swevent_event(event, count, &data, regs); in perf_tp_event()
10489 * some members in data are event-specific and in perf_tp_event()
10492 * the problem that next event skips preparing data in perf_tp_event()
10502 * deliver this event there too. in perf_tp_event()
10563 static int perf_kprobe_event_init(struct perf_event *event);
10575 static int perf_kprobe_event_init(struct perf_event *event) in perf_kprobe_event_init() argument
10580 if (event->attr.type != perf_kprobe.type) in perf_kprobe_event_init()
10589 if (has_branch_stack(event)) in perf_kprobe_event_init()
10592 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_kprobe_event_init()
10593 err = perf_kprobe_init(event, is_retprobe); in perf_kprobe_event_init()
10597 event->destroy = perf_kprobe_destroy; in perf_kprobe_event_init()
10622 static int perf_uprobe_event_init(struct perf_event *event);
10634 static int perf_uprobe_event_init(struct perf_event *event) in perf_uprobe_event_init() argument
10640 if (event->attr.type != perf_uprobe.type) in perf_uprobe_event_init()
10649 if (has_branch_stack(event)) in perf_uprobe_event_init()
10652 is_retprobe = event->attr.config & PERF_PROBE_CONFIG_IS_RETPROBE; in perf_uprobe_event_init()
10653 ref_ctr_offset = event->attr.config >> PERF_UPROBE_REF_CTR_OFFSET_SHIFT; in perf_uprobe_event_init()
10654 err = perf_uprobe_init(event, ref_ctr_offset, is_retprobe); in perf_uprobe_event_init()
10658 event->destroy = perf_uprobe_destroy; in perf_uprobe_event_init()
10675 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10677 ftrace_profile_free_filter(event); in perf_event_free_filter()
10681 * returns true if the event is a tracepoint, or a kprobe/upprobe created
10684 static inline bool perf_event_is_tracing(struct perf_event *event) in perf_event_is_tracing() argument
10686 if (event->pmu == &perf_tracepoint) in perf_event_is_tracing()
10689 if (event->pmu == &perf_kprobe) in perf_event_is_tracing()
10693 if (event->pmu == &perf_uprobe) in perf_event_is_tracing()
10699 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10704 if (!perf_event_is_tracing(event)) in perf_event_set_bpf_prog()
10705 return perf_event_set_bpf_handler(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10707 is_kprobe = event->tp_event->flags & TRACE_EVENT_FL_KPROBE; in perf_event_set_bpf_prog()
10708 is_uprobe = event->tp_event->flags & TRACE_EVENT_FL_UPROBE; in perf_event_set_bpf_prog()
10709 is_tracepoint = event->tp_event->flags & TRACE_EVENT_FL_TRACEPOINT; in perf_event_set_bpf_prog()
10710 is_syscall_tp = is_syscall_trace_event(event->tp_event); in perf_event_set_bpf_prog()
10729 int off = trace_event_get_offsets(event->tp_event); in perf_event_set_bpf_prog()
10735 return perf_event_attach_bpf_prog(event, prog, bpf_cookie); in perf_event_set_bpf_prog()
10738 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10740 if (!perf_event_is_tracing(event)) { in perf_event_free_bpf_prog()
10741 perf_event_free_bpf_handler(event); in perf_event_free_bpf_prog()
10744 perf_event_detach_bpf_prog(event); in perf_event_free_bpf_prog()
10753 static void perf_event_free_filter(struct perf_event *event) in perf_event_free_filter() argument
10757 int perf_event_set_bpf_prog(struct perf_event *event, struct bpf_prog *prog, in perf_event_set_bpf_prog() argument
10763 void perf_event_free_bpf_prog(struct perf_event *event) in perf_event_free_bpf_prog() argument
10785 perf_addr_filter_new(struct perf_event *event, struct list_head *filters) in perf_addr_filter_new() argument
10787 int node = cpu_to_node(event->cpu == -1 ? 0 : event->cpu); in perf_addr_filter_new()
10814 static void perf_addr_filters_splice(struct perf_event *event, in perf_addr_filters_splice() argument
10820 if (!has_addr_filter(event)) in perf_addr_filters_splice()
10824 if (event->parent) in perf_addr_filters_splice()
10827 raw_spin_lock_irqsave(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10829 list_splice_init(&event->addr_filters.list, &list); in perf_addr_filters_splice()
10831 list_splice(head, &event->addr_filters.list); in perf_addr_filters_splice()
10833 raw_spin_unlock_irqrestore(&event->addr_filters.lock, flags); in perf_addr_filters_splice()
10860 * Update event's address range filters based on the
10863 static void perf_event_addr_filters_apply(struct perf_event *event) in perf_event_addr_filters_apply() argument
10865 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_addr_filters_apply()
10866 struct task_struct *task = READ_ONCE(event->ctx->task); in perf_event_addr_filters_apply()
10873 * We may observe TASK_TOMBSTONE, which means that the event tear-down in perf_event_addr_filters_apply()
10894 event->addr_filter_ranges[count].start = 0; in perf_event_addr_filters_apply()
10895 event->addr_filter_ranges[count].size = 0; in perf_event_addr_filters_apply()
10897 perf_addr_filter_apply(filter, mm, &event->addr_filter_ranges[count]); in perf_event_addr_filters_apply()
10899 event->addr_filter_ranges[count].start = filter->offset; in perf_event_addr_filters_apply()
10900 event->addr_filter_ranges[count].size = filter->size; in perf_event_addr_filters_apply()
10906 event->addr_filters_gen++; in perf_event_addr_filters_apply()
10916 perf_event_stop(event, 1); in perf_event_addr_filters_apply()
10970 perf_event_parse_addr_filter(struct perf_event *event, char *fstr, in perf_event_parse_addr_filter() argument
10997 filter = perf_addr_filter_new(event, filters); in perf_event_parse_addr_filter()
11056 * Make sure that it doesn't contradict itself or the event's in perf_event_parse_addr_filter()
11083 if (!event->ctx->task) in perf_event_parse_addr_filter()
11098 event->addr_filters.nr_file_filters++; in perf_event_parse_addr_filter()
11127 perf_event_set_addr_filter(struct perf_event *event, char *filter_str) in perf_event_set_addr_filter() argument
11136 lockdep_assert_held(&event->ctx->mutex); in perf_event_set_addr_filter()
11138 if (WARN_ON_ONCE(event->parent)) in perf_event_set_addr_filter()
11141 ret = perf_event_parse_addr_filter(event, filter_str, &filters); in perf_event_set_addr_filter()
11145 ret = event->pmu->addr_filters_validate(&filters); in perf_event_set_addr_filter()
11150 perf_addr_filters_splice(event, &filters); in perf_event_set_addr_filter()
11153 perf_event_for_each_child(event, perf_event_addr_filters_apply); in perf_event_set_addr_filter()
11161 event->addr_filters.nr_file_filters = 0; in perf_event_set_addr_filter()
11166 static int perf_event_set_filter(struct perf_event *event, void __user *arg) in perf_event_set_filter() argument
11176 if (perf_event_is_tracing(event)) { in perf_event_set_filter()
11177 struct perf_event_context *ctx = event->ctx; in perf_event_set_filter()
11187 * This can result in event getting moved to a different ctx, in perf_event_set_filter()
11191 ret = ftrace_profile_set_filter(event, event->attr.config, filter_str); in perf_event_set_filter()
11195 if (has_addr_filter(event)) in perf_event_set_filter()
11196 ret = perf_event_set_addr_filter(event, filter_str); in perf_event_set_filter()
11211 struct perf_event *event; in perf_swevent_hrtimer() local
11214 event = container_of(hrtimer, struct perf_event, hw.hrtimer); in perf_swevent_hrtimer()
11216 if (event->state != PERF_EVENT_STATE_ACTIVE) in perf_swevent_hrtimer()
11219 event->pmu->read(event); in perf_swevent_hrtimer()
11221 perf_sample_data_init(&data, 0, event->hw.last_period); in perf_swevent_hrtimer()
11224 if (regs && !perf_exclude_event(event, regs)) { in perf_swevent_hrtimer()
11225 if (!(event->attr.exclude_idle && is_idle_task(current))) in perf_swevent_hrtimer()
11226 if (__perf_event_overflow(event, 1, &data, regs)) in perf_swevent_hrtimer()
11230 period = max_t(u64, 10000, event->hw.sample_period); in perf_swevent_hrtimer()
11236 static void perf_swevent_start_hrtimer(struct perf_event *event) in perf_swevent_start_hrtimer() argument
11238 struct hw_perf_event *hwc = &event->hw; in perf_swevent_start_hrtimer()
11241 if (!is_sampling_event(event)) in perf_swevent_start_hrtimer()
11257 static void perf_swevent_cancel_hrtimer(struct perf_event *event) in perf_swevent_cancel_hrtimer() argument
11259 struct hw_perf_event *hwc = &event->hw; in perf_swevent_cancel_hrtimer()
11261 if (is_sampling_event(event)) { in perf_swevent_cancel_hrtimer()
11269 static void perf_swevent_init_hrtimer(struct perf_event *event) in perf_swevent_init_hrtimer() argument
11271 struct hw_perf_event *hwc = &event->hw; in perf_swevent_init_hrtimer()
11273 if (!is_sampling_event(event)) in perf_swevent_init_hrtimer()
11283 if (event->attr.freq) { in perf_swevent_init_hrtimer()
11284 long freq = event->attr.sample_freq; in perf_swevent_init_hrtimer()
11286 event->attr.sample_period = NSEC_PER_SEC / freq; in perf_swevent_init_hrtimer()
11287 hwc->sample_period = event->attr.sample_period; in perf_swevent_init_hrtimer()
11290 event->attr.freq = 0; in perf_swevent_init_hrtimer()
11295 * Software event: cpu wall time clock
11298 static void cpu_clock_event_update(struct perf_event *event) in cpu_clock_event_update() argument
11304 prev = local64_xchg(&event->hw.prev_count, now); in cpu_clock_event_update()
11305 local64_add(now - prev, &event->count); in cpu_clock_event_update()
11308 static void cpu_clock_event_start(struct perf_event *event, int flags) in cpu_clock_event_start() argument
11310 local64_set(&event->hw.prev_count, local_clock()); in cpu_clock_event_start()
11311 perf_swevent_start_hrtimer(event); in cpu_clock_event_start()
11314 static void cpu_clock_event_stop(struct perf_event *event, int flags) in cpu_clock_event_stop() argument
11316 perf_swevent_cancel_hrtimer(event); in cpu_clock_event_stop()
11317 cpu_clock_event_update(event); in cpu_clock_event_stop()
11320 static int cpu_clock_event_add(struct perf_event *event, int flags) in cpu_clock_event_add() argument
11323 cpu_clock_event_start(event, flags); in cpu_clock_event_add()
11324 perf_event_update_userpage(event); in cpu_clock_event_add()
11329 static void cpu_clock_event_del(struct perf_event *event, int flags) in cpu_clock_event_del() argument
11331 cpu_clock_event_stop(event, flags); in cpu_clock_event_del()
11334 static void cpu_clock_event_read(struct perf_event *event) in cpu_clock_event_read() argument
11336 cpu_clock_event_update(event); in cpu_clock_event_read()
11339 static int cpu_clock_event_init(struct perf_event *event) in cpu_clock_event_init() argument
11341 if (event->attr.type != perf_cpu_clock.type) in cpu_clock_event_init()
11344 if (event->attr.config != PERF_COUNT_SW_CPU_CLOCK) in cpu_clock_event_init()
11350 if (has_branch_stack(event)) in cpu_clock_event_init()
11353 perf_swevent_init_hrtimer(event); in cpu_clock_event_init()
11373 * Software event: task time clock
11376 static void task_clock_event_update(struct perf_event *event, u64 now) in task_clock_event_update() argument
11381 prev = local64_xchg(&event->hw.prev_count, now); in task_clock_event_update()
11383 local64_add(delta, &event->count); in task_clock_event_update()
11386 static void task_clock_event_start(struct perf_event *event, int flags) in task_clock_event_start() argument
11388 local64_set(&event->hw.prev_count, event->ctx->time); in task_clock_event_start()
11389 perf_swevent_start_hrtimer(event); in task_clock_event_start()
11392 static void task_clock_event_stop(struct perf_event *event, int flags) in task_clock_event_stop() argument
11394 perf_swevent_cancel_hrtimer(event); in task_clock_event_stop()
11395 task_clock_event_update(event, event->ctx->time); in task_clock_event_stop()
11398 static int task_clock_event_add(struct perf_event *event, int flags) in task_clock_event_add() argument
11401 task_clock_event_start(event, flags); in task_clock_event_add()
11402 perf_event_update_userpage(event); in task_clock_event_add()
11407 static void task_clock_event_del(struct perf_event *event, int flags) in task_clock_event_del() argument
11409 task_clock_event_stop(event, PERF_EF_UPDATE); in task_clock_event_del()
11412 static void task_clock_event_read(struct perf_event *event) in task_clock_event_read() argument
11415 u64 delta = now - event->ctx->timestamp; in task_clock_event_read()
11416 u64 time = event->ctx->time + delta; in task_clock_event_read()
11418 task_clock_event_update(event, time); in task_clock_event_read()
11421 static int task_clock_event_init(struct perf_event *event) in task_clock_event_init() argument
11423 if (event->attr.type != perf_task_clock.type) in task_clock_event_init()
11426 if (event->attr.config != PERF_COUNT_SW_TASK_CLOCK) in task_clock_event_init()
11432 if (has_branch_stack(event)) in task_clock_event_init()
11435 perf_swevent_init_hrtimer(event); in task_clock_event_init()
11467 static int perf_event_nop_int(struct perf_event *event, u64 value) in perf_event_nop_int() argument
11509 static int perf_event_idx_default(struct perf_event *event) in perf_event_idx_default() argument
11860 static inline bool has_extended_regs(struct perf_event *event) in has_extended_regs() argument
11862 return (event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK) || in has_extended_regs()
11863 (event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK); in has_extended_regs()
11866 static int perf_try_init_event(struct pmu *pmu, struct perf_event *event) in perf_try_init_event() argument
11877 * if this is a sibling event, acquire the ctx->mutex to protect in perf_try_init_event()
11880 if (event->group_leader != event && pmu->task_ctx_nr != perf_sw_context) { in perf_try_init_event()
11885 ctx = perf_event_ctx_lock_nested(event->group_leader, in perf_try_init_event()
11890 event->pmu = pmu; in perf_try_init_event()
11891 ret = pmu->event_init(event); in perf_try_init_event()
11894 perf_event_ctx_unlock(event->group_leader, ctx); in perf_try_init_event()
11898 has_extended_regs(event)) in perf_try_init_event()
11902 event_has_any_exclude_flag(event)) in perf_try_init_event()
11905 if (pmu->scope != PERF_PMU_SCOPE_NONE && event->cpu >= 0) { in perf_try_init_event()
11906 const struct cpumask *cpumask = perf_scope_cpu_topology_cpumask(pmu->scope, event->cpu); in perf_try_init_event()
11915 event->event_caps |= PERF_EV_CAP_READ_SCOPE; in perf_try_init_event()
11921 if (ret && event->destroy) in perf_try_init_event()
11922 event->destroy(event); in perf_try_init_event()
11931 static struct pmu *perf_init_event(struct perf_event *event) in perf_init_event() argument
11941 * pmus overwrites event->attr.type to forward event to another pmu. in perf_init_event()
11943 event->orig_type = event->attr.type; in perf_init_event()
11946 if (event->parent && event->parent->pmu) { in perf_init_event()
11947 pmu = event->parent->pmu; in perf_init_event()
11948 ret = perf_try_init_event(pmu, event); in perf_init_event()
11957 type = event->attr.type; in perf_init_event()
11959 type = event->attr.config >> PERF_PMU_TYPE_SHIFT; in perf_init_event()
11964 event->attr.config &= PERF_HW_EVENT_MASK; in perf_init_event()
11973 if (event->attr.type != type && type != PERF_TYPE_RAW && in perf_init_event()
11977 ret = perf_try_init_event(pmu, event); in perf_init_event()
11978 if (ret == -ENOENT && event->attr.type != type && !extended_type) { in perf_init_event()
11979 type = event->attr.type; in perf_init_event()
11990 ret = perf_try_init_event(pmu, event); in perf_init_event()
12007 static void attach_sb_event(struct perf_event *event) in attach_sb_event() argument
12009 struct pmu_event_list *pel = per_cpu_ptr(&pmu_sb_events, event->cpu); in attach_sb_event()
12012 list_add_rcu(&event->sb_list, &pel->list); in attach_sb_event()
12023 static void account_pmu_sb_event(struct perf_event *event) in account_pmu_sb_event() argument
12025 if (is_sb_event(event)) in account_pmu_sb_event()
12026 attach_sb_event(event); in account_pmu_sb_event()
12050 static void account_event(struct perf_event *event) in account_event() argument
12054 if (event->parent) in account_event()
12057 if (event->attach_state & (PERF_ATTACH_TASK | PERF_ATTACH_SCHED_CB)) in account_event()
12059 if (event->attr.mmap || event->attr.mmap_data) in account_event()
12061 if (event->attr.build_id) in account_event()
12063 if (event->attr.comm) in account_event()
12065 if (event->attr.namespaces) in account_event()
12067 if (event->attr.cgroup) in account_event()
12069 if (event->attr.task) in account_event()
12071 if (event->attr.freq) in account_event()
12073 if (event->attr.context_switch) { in account_event()
12077 if (has_branch_stack(event)) in account_event()
12079 if (is_cgroup_event(event)) in account_event()
12081 if (event->attr.ksymbol) in account_event()
12083 if (event->attr.bpf_event) in account_event()
12085 if (event->attr.text_poke) in account_event()
12116 account_pmu_sb_event(event); in account_event()
12120 * Allocate and initialize an event structure
12131 struct perf_event *event; in perf_event_alloc() local
12146 event = kmem_cache_alloc_node(perf_event_cache, GFP_KERNEL | __GFP_ZERO, in perf_event_alloc()
12148 if (!event) in perf_event_alloc()
12156 group_leader = event; in perf_event_alloc()
12158 mutex_init(&event->child_mutex); in perf_event_alloc()
12159 INIT_LIST_HEAD(&event->child_list); in perf_event_alloc()
12161 INIT_LIST_HEAD(&event->event_entry); in perf_event_alloc()
12162 INIT_LIST_HEAD(&event->sibling_list); in perf_event_alloc()
12163 INIT_LIST_HEAD(&event->active_list); in perf_event_alloc()
12164 init_event_group(event); in perf_event_alloc()
12165 INIT_LIST_HEAD(&event->rb_entry); in perf_event_alloc()
12166 INIT_LIST_HEAD(&event->active_entry); in perf_event_alloc()
12167 INIT_LIST_HEAD(&event->addr_filters.list); in perf_event_alloc()
12168 INIT_HLIST_NODE(&event->hlist_entry); in perf_event_alloc()
12171 init_waitqueue_head(&event->waitq); in perf_event_alloc()
12172 init_irq_work(&event->pending_irq, perf_pending_irq); in perf_event_alloc()
12173 event->pending_disable_irq = IRQ_WORK_INIT_HARD(perf_pending_disable); in perf_event_alloc()
12174 init_task_work(&event->pending_task, perf_pending_task); in perf_event_alloc()
12175 rcuwait_init(&event->pending_work_wait); in perf_event_alloc()
12177 mutex_init(&event->mmap_mutex); in perf_event_alloc()
12178 raw_spin_lock_init(&event->addr_filters.lock); in perf_event_alloc()
12180 atomic_long_set(&event->refcount, 1); in perf_event_alloc()
12181 event->cpu = cpu; in perf_event_alloc()
12182 event->attr = *attr; in perf_event_alloc()
12183 event->group_leader = group_leader; in perf_event_alloc()
12184 event->pmu = NULL; in perf_event_alloc()
12185 event->oncpu = -1; in perf_event_alloc()
12187 event->parent = parent_event; in perf_event_alloc()
12189 event->ns = get_pid_ns(task_active_pid_ns(current)); in perf_event_alloc()
12190 event->id = atomic64_inc_return(&perf_event_id); in perf_event_alloc()
12192 event->state = PERF_EVENT_STATE_INACTIVE; in perf_event_alloc()
12195 event->event_caps = parent_event->event_caps; in perf_event_alloc()
12198 event->attach_state = PERF_ATTACH_TASK; in perf_event_alloc()
12204 event->hw.target = get_task_struct(task); in perf_event_alloc()
12207 event->clock = &local_clock; in perf_event_alloc()
12209 event->clock = parent_event->clock; in perf_event_alloc()
12219 event->prog = prog; in perf_event_alloc()
12225 event->overflow_handler = overflow_handler; in perf_event_alloc()
12226 event->overflow_handler_context = context; in perf_event_alloc()
12227 } else if (is_write_backward(event)){ in perf_event_alloc()
12228 event->overflow_handler = perf_event_output_backward; in perf_event_alloc()
12229 event->overflow_handler_context = NULL; in perf_event_alloc()
12231 event->overflow_handler = perf_event_output_forward; in perf_event_alloc()
12232 event->overflow_handler_context = NULL; in perf_event_alloc()
12235 perf_event__state_init(event); in perf_event_alloc()
12239 hwc = &event->hw; in perf_event_alloc()
12256 if (!has_branch_stack(event)) in perf_event_alloc()
12257 event->attr.branch_sample_type = 0; in perf_event_alloc()
12259 pmu = perf_init_event(event); in perf_event_alloc()
12275 if (event->attr.aux_output && in perf_event_alloc()
12282 err = perf_cgroup_connect(cgroup_fd, event, attr, group_leader); in perf_event_alloc()
12287 err = exclusive_event_init(event); in perf_event_alloc()
12291 if (has_addr_filter(event)) { in perf_event_alloc()
12292 event->addr_filter_ranges = kcalloc(pmu->nr_addr_filters, in perf_event_alloc()
12295 if (!event->addr_filter_ranges) { in perf_event_alloc()
12304 if (event->parent) { in perf_event_alloc()
12305 struct perf_addr_filters_head *ifh = perf_event_addr_filters(event); in perf_event_alloc()
12308 memcpy(event->addr_filter_ranges, in perf_event_alloc()
12309 event->parent->addr_filter_ranges, in perf_event_alloc()
12315 event->addr_filters_gen = 1; in perf_event_alloc()
12318 if (!event->parent) { in perf_event_alloc()
12319 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { in perf_event_alloc()
12326 err = security_perf_event_alloc(event); in perf_event_alloc()
12331 account_event(event); in perf_event_alloc()
12333 return event; in perf_event_alloc()
12336 if (!event->parent) { in perf_event_alloc()
12337 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) in perf_event_alloc()
12341 kfree(event->addr_filter_ranges); in perf_event_alloc()
12344 exclusive_event_destroy(event); in perf_event_alloc()
12347 if (is_cgroup_event(event)) in perf_event_alloc()
12348 perf_detach_cgroup(event); in perf_event_alloc()
12349 if (event->destroy) in perf_event_alloc()
12350 event->destroy(event); in perf_event_alloc()
12353 if (event->hw.target) in perf_event_alloc()
12354 put_task_struct(event->hw.target); in perf_event_alloc()
12355 call_rcu(&event->rcu_head, free_event_rcu); in perf_event_alloc()
12496 perf_event_set_output(struct perf_event *event, struct perf_event *output_event) in perf_event_set_output() argument
12502 mutex_lock(&event->mmap_mutex); in perf_event_set_output()
12507 if (event == output_event) in perf_event_set_output()
12513 if (output_event->cpu != event->cpu) in perf_event_set_output()
12519 if (output_event->cpu == -1 && output_event->hw.target != event->hw.target) in perf_event_set_output()
12525 if (output_event->clock != event->clock) in perf_event_set_output()
12532 if (is_write_backward(output_event) != is_write_backward(event)) in perf_event_set_output()
12538 if (has_aux(event) && has_aux(output_event) && in perf_event_set_output()
12539 event->pmu != output_event->pmu) in perf_event_set_output()
12545 * restarts after every removal, it is guaranteed this new event is in perf_event_set_output()
12549 mutex_lock_double(&event->mmap_mutex, &output_event->mmap_mutex); in perf_event_set_output()
12552 if (atomic_read(&event->mmap_count)) in perf_event_set_output()
12568 ring_buffer_attach(event, rb); in perf_event_set_output()
12572 mutex_unlock(&event->mmap_mutex); in perf_event_set_output()
12580 static int perf_event_set_clock(struct perf_event *event, clockid_t clk_id) in perf_event_set_clock() argument
12586 event->clock = &ktime_get_mono_fast_ns; in perf_event_set_clock()
12591 event->clock = &ktime_get_raw_fast_ns; in perf_event_set_clock()
12596 event->clock = &ktime_get_real_ns; in perf_event_set_clock()
12600 event->clock = &ktime_get_boottime_ns; in perf_event_set_clock()
12604 event->clock = &ktime_get_clocktai_ns; in perf_event_set_clock()
12611 if (!nmi_safe && !(event->pmu->capabilities & PERF_PMU_CAP_NO_NMI)) in perf_event_set_clock()
12649 * sys_perf_event_open - open a performance event, associate it to a task/cpu
12654 * @group_fd: group leader event fd
12655 * @flags: perf event open flags
12663 struct perf_event *event, *sibling; in SYSCALL_DEFINE5() local
12766 event = perf_event_alloc(&attr, cpu, task, group_leader, NULL, in SYSCALL_DEFINE5()
12768 if (IS_ERR(event)) { in SYSCALL_DEFINE5()
12769 err = PTR_ERR(event); in SYSCALL_DEFINE5()
12773 if (is_sampling_event(event)) { in SYSCALL_DEFINE5()
12774 if (event->pmu->capabilities & PERF_PMU_CAP_NO_INTERRUPT) { in SYSCALL_DEFINE5()
12784 pmu = event->pmu; in SYSCALL_DEFINE5()
12787 err = perf_event_set_clock(event, attr.clockid); in SYSCALL_DEFINE5()
12793 event->event_caps |= PERF_EV_CAP_SOFTWARE; in SYSCALL_DEFINE5()
12802 * perf_install_in_context() call for this new event to in SYSCALL_DEFINE5()
12814 ctx = find_get_context(task, event); in SYSCALL_DEFINE5()
12829 * Check if the @cpu we're creating an event for is online. in SYSCALL_DEFINE5()
12834 struct perf_cpu_context *cpuctx = per_cpu_ptr(&perf_cpu_context, event->cpu); in SYSCALL_DEFINE5()
12853 if (group_leader->clock != event->clock) in SYSCALL_DEFINE5()
12861 if (group_leader->cpu != event->cpu) in SYSCALL_DEFINE5()
12876 if (is_software_event(event) && in SYSCALL_DEFINE5()
12879 * If the event is a sw event, but the group_leader in SYSCALL_DEFINE5()
12890 } else if (!is_software_event(event)) { in SYSCALL_DEFINE5()
12895 * try to add a hardware event, move the whole group to in SYSCALL_DEFINE5()
12911 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in SYSCALL_DEFINE5()
12916 event->pmu_ctx = pmu_ctx; in SYSCALL_DEFINE5()
12919 err = perf_event_set_output(event, output_event); in SYSCALL_DEFINE5()
12924 if (!perf_event_validate_size(event)) { in SYSCALL_DEFINE5()
12929 if (perf_need_aux_event(event) && !perf_get_aux_event(event, group_leader)) { in SYSCALL_DEFINE5()
12936 * because we need to serialize with concurrent event creation. in SYSCALL_DEFINE5()
12938 if (!exclusive_event_installable(event, ctx)) { in SYSCALL_DEFINE5()
12945 event_file = anon_inode_getfile("[perf_event]", &perf_fops, event, f_flags); in SYSCALL_DEFINE5()
12985 * event. What we want here is event in the initial in SYSCALL_DEFINE5()
12997 * perf_install_in_context() which is the point the event is active and in SYSCALL_DEFINE5()
13000 perf_event__header_size(event); in SYSCALL_DEFINE5()
13001 perf_event__id_header_size(event); in SYSCALL_DEFINE5()
13003 event->owner = current; in SYSCALL_DEFINE5()
13005 perf_install_in_context(ctx, event, event->cpu); in SYSCALL_DEFINE5()
13016 list_add_tail(&event->owner_entry, &current->perf_event_list); in SYSCALL_DEFINE5()
13021 * new event on the sibling_list. This ensures destruction in SYSCALL_DEFINE5()
13030 put_pmu_ctx(event->pmu_ctx); in SYSCALL_DEFINE5()
13031 event->pmu_ctx = NULL; /* _free_event() */ in SYSCALL_DEFINE5()
13040 free_event(event); in SYSCALL_DEFINE5()
13057 * @overflow_handler: callback to trigger when we hit the event
13068 struct perf_event *event; in perf_event_create_kernel_counter() local
13079 event = perf_event_alloc(attr, cpu, task, NULL, NULL, in perf_event_create_kernel_counter()
13081 if (IS_ERR(event)) { in perf_event_create_kernel_counter()
13082 err = PTR_ERR(event); in perf_event_create_kernel_counter()
13087 event->owner = TASK_TOMBSTONE; in perf_event_create_kernel_counter()
13088 pmu = event->pmu; in perf_event_create_kernel_counter()
13091 event->event_caps |= PERF_EV_CAP_SOFTWARE; in perf_event_create_kernel_counter()
13096 ctx = find_get_context(task, event); in perf_event_create_kernel_counter()
13109 pmu_ctx = find_get_pmu_context(pmu, ctx, event); in perf_event_create_kernel_counter()
13114 event->pmu_ctx = pmu_ctx; in perf_event_create_kernel_counter()
13118 * Check if the @cpu we're creating an event for is online. in perf_event_create_kernel_counter()
13131 if (!exclusive_event_installable(event, ctx)) { in perf_event_create_kernel_counter()
13136 perf_install_in_context(ctx, event, event->cpu); in perf_event_create_kernel_counter()
13140 return event; in perf_event_create_kernel_counter()
13144 event->pmu_ctx = NULL; /* _free_event() */ in perf_event_create_kernel_counter()
13150 free_event(event); in perf_event_create_kernel_counter()
13161 struct perf_event *event, *sibling; in __perf_pmu_remove() local
13163 perf_event_groups_for_cpu_pmu(event, groups, cpu, pmu) { in __perf_pmu_remove()
13164 perf_remove_from_context(event, 0); in __perf_pmu_remove()
13165 put_pmu_ctx(event->pmu_ctx); in __perf_pmu_remove()
13166 list_add(&event->migrate_entry, events); in __perf_pmu_remove()
13168 for_each_sibling_event(sibling, event) { in __perf_pmu_remove()
13178 int cpu, struct perf_event *event) in __perf_pmu_install_event() argument
13181 struct perf_event_context *old_ctx = event->ctx; in __perf_pmu_install_event()
13185 event->cpu = cpu; in __perf_pmu_install_event()
13186 epc = find_get_pmu_context(pmu, ctx, event); in __perf_pmu_install_event()
13187 event->pmu_ctx = epc; in __perf_pmu_install_event()
13189 if (event->state >= PERF_EVENT_STATE_OFF) in __perf_pmu_install_event()
13190 event->state = PERF_EVENT_STATE_INACTIVE; in __perf_pmu_install_event()
13191 perf_install_in_context(ctx, event, cpu); in __perf_pmu_install_event()
13194 * Now that event->ctx is updated and visible, put the old ctx. in __perf_pmu_install_event()
13202 struct perf_event *event, *tmp; in __perf_pmu_install() local
13212 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13213 if (event->group_leader == event) in __perf_pmu_install()
13216 list_del(&event->migrate_entry); in __perf_pmu_install()
13217 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13224 list_for_each_entry_safe(event, tmp, events, migrate_entry) { in __perf_pmu_install()
13225 list_del(&event->migrate_entry); in __perf_pmu_install()
13226 __perf_pmu_install_event(pmu, ctx, cpu, event); in __perf_pmu_install()
13290 perf_event_exit_event(struct perf_event *event, struct perf_event_context *ctx) in perf_event_exit_event() argument
13292 struct perf_event *parent_event = event->parent; in perf_event_exit_event()
13312 perf_remove_from_context(event, detach_flags); in perf_event_exit_event()
13315 if (event->state > PERF_EVENT_STATE_EXIT) in perf_event_exit_event()
13316 perf_event_set_state(event, PERF_EVENT_STATE_EXIT); in perf_event_exit_event()
13328 free_event(event); in perf_event_exit_event()
13336 perf_event_wakeup(event); in perf_event_exit_event()
13401 * When a child task exits, feed back event values to parent events.
13408 struct perf_event *event, *tmp; in perf_event_exit_task() local
13411 list_for_each_entry_safe(event, tmp, &child->perf_event_list, in perf_event_exit_task()
13413 list_del_init(&event->owner_entry); in perf_event_exit_task()
13420 smp_store_release(&event->owner, NULL); in perf_event_exit_task()
13435 static void perf_free_event(struct perf_event *event, in perf_free_event() argument
13438 struct perf_event *parent = event->parent; in perf_free_event()
13444 list_del_init(&event->child_list); in perf_free_event()
13450 perf_group_detach(event); in perf_free_event()
13451 list_del_event(event, ctx); in perf_free_event()
13453 free_event(event); in perf_free_event()
13466 struct perf_event *event, *tmp; in perf_event_free_task() local
13486 list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) in perf_event_free_task()
13487 perf_free_event(event, ctx); in perf_event_free_task()
13500 * _free_event()'s put_task_struct(event->hw.target) will be a in perf_event_free_task()
13536 const struct perf_event_attr *perf_event_attrs(struct perf_event *event) in perf_event_attrs() argument
13538 if (!event) in perf_event_attrs()
13541 return &event->attr; in perf_event_attrs()
13554 * Inherit an event from parent task to child task.
13616 * Make the child state follow the state of the parent event, in inherit_event()
13655 * Link this into the parent event's child list in inherit_event()
13664 * Inherits an event group.
13708 * Creates the child task context and tries to inherit the event-group.
13711 * inherited_all set when we 'fail' to inherit an orphaned event; this is
13719 inherit_task_group(struct perf_event *event, struct task_struct *parent, in inherit_task_group() argument
13727 if (!event->attr.inherit || in inherit_task_group()
13728 (event->attr.inherit_thread && !(clone_flags & CLONE_THREAD)) || in inherit_task_group()
13730 (event->attr.sigtrap && (clone_flags & CLONE_CLEAR_SIGHAND))) { in inherit_task_group()
13750 ret = inherit_group(event, parent, parent_ctx, child, child_ctx); in inherit_task_group()
13764 struct perf_event *event; in perf_event_init_context() local
13798 perf_event_groups_for_each(event, &parent_ctx->pinned_groups) { in perf_event_init_context()
13799 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13814 perf_event_groups_for_each(event, &parent_ctx->flexible_groups) { in perf_event_init_context()
13815 ret = inherit_task_group(event, parent, parent_ctx, in perf_event_init_context()
13929 struct perf_event *event; in __perf_event_exit_context() local
13933 list_for_each_entry(event, &ctx->event_list, event_entry) in __perf_event_exit_context()
13934 __perf_remove_from_context(event, cpuctx, ctx, (void *)DETACH_GROUP); in __perf_event_exit_context()