Lines Matching +full:de +full:- +full:serialized
4 * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
12 * For licencing details see kernel-base/COPYING
21 * Kernel-internal data types and definitions:
39 #include <linux/rhashtable-types.h>
100 return frag->pad < sizeof(u64); in perf_raw_frag_last()
108 * -1ULL means invalid/unknown.
118 * The hw_idx index is between -1 (unknown) and max depth,
138 int idx; /* index in shared_regs->regs[] */
144 * PERF_EVENT_FLAG_ARCH bits are reserved for architecture-specific
153 * struct hw_perf_event - performance event hardware details:
171 struct { /* aux / Intel-PT */
178 /* for tp_event->class */
223 #define PERF_HES_UPTODATE 0x02 /* event->count up-to-date */
316 * struct pmu - generic performance monitoring unit
330 * various common per-pmu feature flags
359 * -ENOENT -- @event is not for this PMU
361 * -ENODEV -- @event is for this PMU but PMU not present
362 * -EBUSY -- @event is for this PMU but PMU temporarily unavailable
363 * -EINVAL -- @event is for this PMU but @event is not valid
364 * -EOPNOTSUPP -- @event is for this PMU, @event is valid, but not supported
365 * -EACCES -- @event is for this PMU, @event is valid, but no privileges
367 * 0 -- @event is for this PMU and valid
381 * Flags for ->add()/->del()/ ->start()/->stop(). There are
390 * transaction, see the ->*_txn() methods.
399 * ->add() called without PERF_EF_START should result in the same state
400 * as ->add() followed by ->stop().
402 * ->del() must always PERF_EF_UPDATE stop an event. If it calls
403 * ->stop() that must deal with already being stopped without
413 * returns !0. ->start() will be used to continue.
418 * is on -- will be called from NMI context with the PMU generates
421 * ->stop() with PERF_EF_UPDATE will read the counter and update
422 * period/count values like ->read() would.
424 * ->start() with PERF_EF_RELOAD will reprogram the counter
425 * value, must be preceded by a ->stop() with PERF_EF_UPDATE.
443 * Start the transaction, after this ->add() doesn't need to
450 * If ->start_txn() disabled the ->add() schedulability test
451 * then ->commit_txn() is required to perform one. On success
453 * open until ->cancel_txn() is called.
459 * Will cancel the transaction, assumes ->del() is called
460 * for each successful ->add() during the transaction.
474 * context-switches callback
485 * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
495 * Set up pmu-private data structures for an AUX area
502 * Free pmu-private AUX data structures
508 * state, so that preempting ->start()/->stop() callbacks does
522 * supplied filters are valid, -errno otherwise.
524 * Runs in the context of the ioctl()ing process and is not serialized
532 * translate hw-agnostic filters into hardware configuration in
535 * Runs as a part of filter sync sequence that is done in ->start()
549 * or non-zero for "match".
573 * struct perf_addr_filter - address range filter definition
575 * @path: object file's path for file-based filters
580 * This is a hardware-agnostic filter configuration as specified by the user.
591 * struct perf_addr_filters_head - container for address range filters
595 * @nr_file_filters: number of file-based filters
612 * enum perf_event_state - the states of an event:
615 PERF_EVENT_STATE_DEAD = -4,
616 PERF_EVENT_STATE_EXIT = -3,
617 PERF_EVENT_STATE_ERROR = -2,
618 PERF_EVENT_STATE_OFF = -1,
673 * event->sibling_list is modified whole holding both ctx->lock and ctx->mutex
674 * as such iteration must hold either lock. However, since ctx->lock is an IRQ
676 * disabled is sufficient since it will hold-off the IPIs.
682 lockdep_is_held(&(event)->ctx->mutex) != LOCK_STATE_HELD))
689 if ((event)->group_leader == (event)) \
690 list_for_each_entry((sibling), &(event)->sibling_list, sibling_list)
693 * struct perf_event - performance event kernel representation:
699 * modifications require ctx->lock
705 * Locked for modification by both ctx->mutex and ctx->lock; holding
726 /* Not serialized. Only written during event initialization. */
734 * event->pmu will always point to pmu in which this event belongs.
735 * Whereas event->pmu_ctx->pmu may point to other pmu when group of
749 * been scheduled in, if this is a per-task event)
764 * event->pmu_ctx points to perf_event_pmu_context in which the event
766 * sw event is part of a group which also contains non-sw events.
819 /* vma address array for file-based filders */
858 * Certain events gets forwarded to another pmu internally by over-
859 * writing kernel copy of event->attr.type without user being aware
860 * of it. event->orig_type contains original 'type' requested by
868 * ,-----------------------[1:n]------------------------.
870 * perf_event_context <-[1:n]-> perf_event_pmu_context <-[1:n]- perf_event
872 * `--[n:1]-> pmu <-[1:n]--'
879 * modification, both: ctx->mutex && ctx->lock
880 * reading, either: ctx->mutex || ctx->lock
883 * with ctx->mutex held; this means that as long as we can guarantee the epc
887 * ctx->mutex pinning the configuration. Since we hold a reference on
889 * associated pmu_ctx must exist and cannot change due to ctx->mutex.
903 /* Used to avoid freeing per-cpu perf_event_pmu_context */
910 atomic_t refcount; /* event <-> epc */
925 return !list_empty(&epc->flexible_active) || !list_empty(&epc->pinned_active); in perf_pmu_ctx_is_active()
935 * struct perf_event_context - event context structure
966 refcount_t refcount; /* event <-> ctx */
990 * The count of events for which using the switch-out fast path
993 * Sum (event->pending_work + events with
994 * (attr->inherit && (attr->sample_type & PERF_SAMPLE_READ)))
996 * The SIGTRAP is targeted at ctx->task, as such it won't do changing
1019 * struct perf_event_cpu_context - per cpu event context structure
1031 * Per-CPU storage for iterators used in visit_groups_merge. The default
1062 * This is a per-cpu dynamically allocated data structure.
1085 ctx ? lockdep_is_held(&ctx->lock) in perf_cgroup_from_task()
1149 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS; in branch_sample_no_flags()
1154 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES; in branch_sample_no_cycles()
1159 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_TYPE_SAVE; in branch_sample_type()
1164 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX; in branch_sample_hw_index()
1169 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_PRIV_SAVE; in branch_sample_priv()
1174 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS; in branch_sample_counters()
1179 return event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK; in branch_sample_call_stack()
1245 data->sample_flags = PERF_SAMPLE_PERIOD; in perf_sample_data_init()
1246 data->period = period; in perf_sample_data_init()
1247 data->dyn_size = 0; in perf_sample_data_init()
1250 data->addr = addr; in perf_sample_data_init()
1251 data->sample_flags |= PERF_SAMPLE_ADDR; in perf_sample_data_init()
1261 data->callchain = perf_callchain(event, regs); in perf_sample_save_callchain()
1262 size += data->callchain->nr; in perf_sample_save_callchain()
1264 data->dyn_size += size * sizeof(u64); in perf_sample_save_callchain()
1265 data->sample_flags |= PERF_SAMPLE_CALLCHAIN; in perf_sample_save_callchain()
1271 struct perf_raw_frag *frag = &raw->frag; in perf_sample_save_raw_data()
1276 sum += frag->size; in perf_sample_save_raw_data()
1279 frag = frag->next; in perf_sample_save_raw_data()
1283 raw->size = size - sizeof(u32); in perf_sample_save_raw_data()
1284 frag->pad = raw->size - sum; in perf_sample_save_raw_data()
1286 data->raw = raw; in perf_sample_save_raw_data()
1287 data->dyn_size += size; in perf_sample_save_raw_data()
1288 data->sample_flags |= PERF_SAMPLE_RAW; in perf_sample_save_raw_data()
1300 size += brs->nr * sizeof(struct perf_branch_entry); in perf_sample_save_brstack()
1308 size += brs->nr * sizeof(u64); in perf_sample_save_brstack()
1310 data->br_stack = brs; in perf_sample_save_brstack()
1311 data->br_stack_cntr = brs_cntr; in perf_sample_save_brstack()
1312 data->dyn_size += size; in perf_sample_save_brstack()
1313 data->sample_flags |= PERF_SAMPLE_BRANCH_STACK; in perf_sample_save_brstack()
1321 size += event->header_size + event->id_header_size; in perf_sample_data_size()
1322 size += data->dyn_size; in perf_sample_data_size()
1334 br->mispred = 0; in perf_clear_branch_entry_bitfields()
1335 br->predicted = 0; in perf_clear_branch_entry_bitfields()
1336 br->in_tx = 0; in perf_clear_branch_entry_bitfields()
1337 br->abort = 0; in perf_clear_branch_entry_bitfields()
1338 br->cycles = 0; in perf_clear_branch_entry_bitfields()
1339 br->type = 0; in perf_clear_branch_entry_bitfields()
1340 br->spec = PERF_BR_SPEC_NA; in perf_clear_branch_entry_bitfields()
1341 br->reserved = 0; in perf_clear_branch_entry_bitfields()
1373 perf_overflow_handler_t overflow_handler = event->overflow_handler; in is_default_overflow_handler()
1396 struct perf_event_attr *attr = &event->attr; in event_has_any_exclude_flag()
1398 return attr->exclude_idle || attr->exclude_user || in event_has_any_exclude_flag()
1399 attr->exclude_kernel || attr->exclude_hv || in event_has_any_exclude_flag()
1400 attr->exclude_guest || attr->exclude_host; in event_has_any_exclude_flag()
1405 return event->attr.sample_period != 0; in is_sampling_event()
1413 return event->event_caps & PERF_EV_CAP_SOFTWARE; in is_software_event()
1421 return event->pmu_ctx->pmu->task_ctx_nr == perf_sw_context; in in_software_context()
1426 return pmu->capabilities & PERF_PMU_CAP_EXCLUSIVE; in is_exclusive_pmu()
1439 * When generating a perf sample in-line, instead of from an interrupt /
1441 * like: SW_CONTEXT_SWITCHES, SW_MIGRATIONS and the tie-in with tracepoints.
1444 * - ip for PERF_SAMPLE_IP
1445 * - cs for user_mode() tests
1446 * - sp for PERF_SAMPLE_CALLCHAIN
1447 * - eflags for MISC bits and CALLCHAIN (see: perf_hw_regs())
1489 task->sched_migrated = 1; in perf_event_task_migrate()
1499 task->sched_migrated) { in perf_event_task_sched_in()
1501 task->sched_migrated = 0; in perf_event_task_sched_in()
1533 DECLARE_STATIC_CALL(__perf_guest_state, *perf_guest_cbs->state);
1534 DECLARE_STATIC_CALL(__perf_guest_get_ip, *perf_guest_cbs->get_ip);
1535 DECLARE_STATIC_CALL(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->handle_intel_pt_intr);
1583 if (ctx->contexts < sysctl_perf_event_max_contexts_per_stack) { in perf_callchain_store_context()
1584 struct perf_callchain_entry *entry = ctx->entry; in perf_callchain_store_context()
1585 entry->ip[entry->nr++] = ip; in perf_callchain_store_context()
1586 ++ctx->contexts; in perf_callchain_store_context()
1589 ctx->contexts_maxed = true; in perf_callchain_store_context()
1590 return -1; /* no more room, stop walking the stack */ in perf_callchain_store_context()
1596 if (ctx->nr < ctx->max_stack && !ctx->contexts_maxed) { in perf_callchain_store()
1597 struct perf_callchain_entry *entry = ctx->entry; in perf_callchain_store()
1598 entry->ip[entry->nr++] = ip; in perf_callchain_store()
1599 ++ctx->nr; in perf_callchain_store()
1602 return -1; /* no more room, stop walking the stack */ in perf_callchain_store()
1630 return sysctl_perf_event_paranoid > -1; in perf_is_paranoid()
1638 return -EACCES; in perf_allow_cpu()
1645 if (sysctl_perf_event_paranoid > -1 && !perfmon_capable()) in perf_allow_tracepoint()
1646 return -EPERM; in perf_allow_tracepoint()
1669 return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK; in has_branch_stack()
1674 return event->attr.branch_sample_type != 0; in needs_branch_stack()
1679 return event->pmu->setup_aux; in has_aux()
1684 return !!event->attr.write_backward; in is_write_backward()
1689 return event->pmu->nr_addr_filters; in has_addr_filter()
1698 struct perf_addr_filters_head *ifh = &event->addr_filters; in perf_event_addr_filters()
1700 if (event->parent) in perf_event_addr_filters()
1701 ifh = &event->parent->addr_filters; in perf_event_addr_filters()
1709 if (event->parent) in perf_event_fasync()
1710 event = event->parent; in perf_event_fasync()
1711 return &event->fasync; in perf_event_fasync()
1757 unsigned long size) { return -EINVAL; } in perf_aux_output_skip()
1773 static inline struct file *perf_event_get(unsigned int fd) { return ERR_PTR(-EINVAL); } in perf_event_get()
1776 return ERR_PTR(-EINVAL); in perf_get_event()
1780 return ERR_PTR(-EINVAL); in perf_event_attrs()
1785 return -EINVAL; in perf_event_read_local()
1788 static inline int perf_event_task_disable(void) { return -EINVAL; } in perf_event_task_disable()
1789 static inline int perf_event_task_enable(void) { return -EINVAL; } in perf_event_task_enable()
1792 return -EINVAL; in perf_event_refresh()
1818 static inline int perf_swevent_get_recursion_context(void) { return -1; } in perf_swevent_get_recursion_context()
1823 static inline int __perf_event_disable(void *info) { return -1; } in __perf_event_disable()
1828 return -EINVAL; in perf_event_period()