Lines Matching +full:int +full:- +full:map +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0
19 void perf_mmap__init(struct perf_mmap *map, struct perf_mmap *prev, in perf_mmap__init() argument
23 map->fd = -1; in perf_mmap__init()
24 map->overwrite = overwrite; in perf_mmap__init()
25 map->unmap_cb = unmap_cb; in perf_mmap__init()
26 refcount_set(&map->refcnt, 0); in perf_mmap__init()
28 prev->next = map; in perf_mmap__init()
31 size_t perf_mmap__mmap_len(struct perf_mmap *map) in perf_mmap__mmap_len() argument
33 return map->mask + 1 + page_size; in perf_mmap__mmap_len()
36 int perf_mmap__mmap(struct perf_mmap *map, struct perf_mmap_param *mp, in perf_mmap__mmap() argument
37 int fd, struct perf_cpu cpu) in perf_mmap__mmap()
39 map->prev = 0; in perf_mmap__mmap()
40 map->mask = mp->mask; in perf_mmap__mmap()
41 map->base = mmap(NULL, perf_mmap__mmap_len(map), mp->prot, in perf_mmap__mmap()
43 if (map->base == MAP_FAILED) { in perf_mmap__mmap()
44 map->base = NULL; in perf_mmap__mmap()
45 return -1; in perf_mmap__mmap()
48 map->fd = fd; in perf_mmap__mmap()
49 map->cpu = cpu; in perf_mmap__mmap()
53 void perf_mmap__munmap(struct perf_mmap *map) in perf_mmap__munmap() argument
55 if (!map) in perf_mmap__munmap()
58 zfree(&map->event_copy); in perf_mmap__munmap()
59 map->event_copy_sz = 0; in perf_mmap__munmap()
60 if (map->base) { in perf_mmap__munmap()
61 munmap(map->base, perf_mmap__mmap_len(map)); in perf_mmap__munmap()
62 map->base = NULL; in perf_mmap__munmap()
63 map->fd = -1; in perf_mmap__munmap()
64 refcount_set(&map->refcnt, 0); in perf_mmap__munmap()
66 if (map->unmap_cb) in perf_mmap__munmap()
67 map->unmap_cb(map); in perf_mmap__munmap()
70 void perf_mmap__get(struct perf_mmap *map) in perf_mmap__get() argument
72 refcount_inc(&map->refcnt); in perf_mmap__get()
75 void perf_mmap__put(struct perf_mmap *map) in perf_mmap__put() argument
77 BUG_ON(map->base && refcount_read(&map->refcnt) == 0); in perf_mmap__put()
79 if (refcount_dec_and_test(&map->refcnt)) in perf_mmap__put()
80 perf_mmap__munmap(map); in perf_mmap__put()
85 ring_buffer_write_tail(md->base, tail); in perf_mmap__write_tail()
88 u64 perf_mmap__read_head(struct perf_mmap *map) in perf_mmap__read_head() argument
90 return ring_buffer_read_head(map->base); in perf_mmap__read_head()
93 static bool perf_mmap__empty(struct perf_mmap *map) in perf_mmap__empty() argument
95 struct perf_event_mmap_page *pc = map->base; in perf_mmap__empty()
97 return perf_mmap__read_head(map) == map->prev && !pc->aux_size; in perf_mmap__empty()
100 void perf_mmap__consume(struct perf_mmap *map) in perf_mmap__consume() argument
102 if (!map->overwrite) { in perf_mmap__consume()
103 u64 old = map->prev; in perf_mmap__consume()
105 perf_mmap__write_tail(map, old); in perf_mmap__consume()
108 if (refcount_read(&map->refcnt) == 1 && perf_mmap__empty(map)) in perf_mmap__consume()
109 perf_mmap__put(map); in perf_mmap__consume()
112 static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) in overwrite_rb_find_range() argument
116 int size = mask + 1; in overwrite_rb_find_range()
119 pheader = (struct perf_event_header *)(buf + (*start & mask)); in overwrite_rb_find_range()
121 if (evt_head - *start >= (unsigned int)size) { in overwrite_rb_find_range()
123 if (evt_head - *start > (unsigned int)size) in overwrite_rb_find_range()
124 evt_head -= pheader->size; in overwrite_rb_find_range()
129 pheader = (struct perf_event_header *)(buf + (evt_head & mask)); in overwrite_rb_find_range()
131 if (pheader->size == 0) { in overwrite_rb_find_range()
137 evt_head += pheader->size; in overwrite_rb_find_range()
141 return -1; in overwrite_rb_find_range()
147 static int __perf_mmap__read_init(struct perf_mmap *md) in __perf_mmap__read_init()
150 u64 old = md->prev; in __perf_mmap__read_init()
151 unsigned char *data = md->base + page_size; in __perf_mmap__read_init()
154 md->start = md->overwrite ? head : old; in __perf_mmap__read_init()
155 md->end = md->overwrite ? old : head; in __perf_mmap__read_init()
157 if ((md->end - md->start) < md->flush) in __perf_mmap__read_init()
158 return -EAGAIN; in __perf_mmap__read_init()
160 size = md->end - md->start; in __perf_mmap__read_init()
161 if (size > (unsigned long)(md->mask) + 1) { in __perf_mmap__read_init()
162 if (!md->overwrite) { in __perf_mmap__read_init()
165 md->prev = head; in __perf_mmap__read_init()
167 return -EAGAIN; in __perf_mmap__read_init()
174 if (overwrite_rb_find_range(data, md->mask, &md->start, &md->end)) in __perf_mmap__read_init()
175 return -EINVAL; in __perf_mmap__read_init()
181 int perf_mmap__read_init(struct perf_mmap *map) in perf_mmap__read_init() argument
186 if (!refcount_read(&map->refcnt)) in perf_mmap__read_init()
187 return -ENOENT; in perf_mmap__read_init()
189 return __perf_mmap__read_init(map); in perf_mmap__read_init()
195 * The last perf_mmap__read() will set tail to map->core.prev.
196 * Need to correct the map->core.prev to head which is the end of next read.
198 void perf_mmap__read_done(struct perf_mmap *map) in perf_mmap__read_done() argument
203 if (!refcount_read(&map->refcnt)) in perf_mmap__read_done()
206 map->prev = perf_mmap__read_head(map); in perf_mmap__read_done()
210 static union perf_event *perf_mmap__read(struct perf_mmap *map, in perf_mmap__read() argument
213 unsigned char *data = map->base + page_size; in perf_mmap__read()
215 int diff = end - *startp; in perf_mmap__read()
217 if (diff >= (int)sizeof(event->header)) { in perf_mmap__read()
220 event = (union perf_event *)&data[*startp & map->mask]; in perf_mmap__read()
221 size = event->header.size; in perf_mmap__read()
223 if (size < sizeof(event->header) || diff < (int)size) in perf_mmap__read()
227 * Event straddles the mmap boundary -- header should always in perf_mmap__read()
230 if ((*startp & map->mask) + size != ((*startp + size) & map->mask)) { in perf_mmap__read()
231 unsigned int offset = *startp; in perf_mmap__read()
232 unsigned int len = size, cpy; in perf_mmap__read()
233 void *dst = map->event_copy; in perf_mmap__read()
235 if (size > map->event_copy_sz) { in perf_mmap__read()
236 dst = realloc(map->event_copy, size); in perf_mmap__read()
239 map->event_copy = dst; in perf_mmap__read()
240 map->event_copy_sz = size; in perf_mmap__read()
244 cpy = min(map->mask + 1 - (offset & map->mask), len); in perf_mmap__read()
245 memcpy(dst, &data[offset & map->mask], cpy); in perf_mmap__read()
248 len -= cpy; in perf_mmap__read()
251 event = (union perf_event *)map->event_copy; in perf_mmap__read()
272 union perf_event *perf_mmap__read_event(struct perf_mmap *map) in perf_mmap__read_event() argument
279 if (!refcount_read(&map->refcnt)) in perf_mmap__read_event()
282 /* non-overwrite doesn't pause the ringbuffer */ in perf_mmap__read_event()
283 if (!map->overwrite) in perf_mmap__read_event()
284 map->end = perf_mmap__read_head(map); in perf_mmap__read_event()
286 event = perf_mmap__read(map, &map->start, map->end); in perf_mmap__read_event()
288 if (!map->overwrite) in perf_mmap__read_event()
289 map->prev = map->start; in perf_mmap__read_event()
295 static u64 read_perf_counter(unsigned int counter) in read_perf_counter()
297 unsigned int low, high; in read_perf_counter()
306 unsigned int low, high; in read_timestamp()
364 static u64 read_perf_counter(unsigned int counter) in read_perf_counter()
409 /* __riscv_xlen contains the witdh of the native base integer, here 64-bit */
426 static unsigned long csr_read_num(int csr_num) in csr_read_num()
465 static u64 read_perf_counter(unsigned int counter) in read_perf_counter()
476 static u64 read_perf_counter(unsigned int counter __maybe_unused) { return 0; } in read_perf_counter()
480 int perf_mmap__read_self(struct perf_mmap *map, struct perf_counts_values *count) in perf_mmap__read_self() argument
482 struct perf_event_mmap_page *pc = map->base; in perf_mmap__read_self()
486 if (!pc || !pc->cap_user_rdpmc) in perf_mmap__read_self()
487 return -1; in perf_mmap__read_self()
490 seq = READ_ONCE(pc->lock); in perf_mmap__read_self()
493 count->ena = READ_ONCE(pc->time_enabled); in perf_mmap__read_self()
494 count->run = READ_ONCE(pc->time_running); in perf_mmap__read_self()
496 if (pc->cap_user_time && count->ena != count->run) { in perf_mmap__read_self()
498 time_mult = READ_ONCE(pc->time_mult); in perf_mmap__read_self()
499 time_shift = READ_ONCE(pc->time_shift); in perf_mmap__read_self()
500 time_offset = READ_ONCE(pc->time_offset); in perf_mmap__read_self()
502 if (pc->cap_user_time_short) { in perf_mmap__read_self()
503 time_cycles = READ_ONCE(pc->time_cycles); in perf_mmap__read_self()
504 time_mask = READ_ONCE(pc->time_mask); in perf_mmap__read_self()
508 idx = READ_ONCE(pc->index); in perf_mmap__read_self()
509 cnt = READ_ONCE(pc->offset); in perf_mmap__read_self()
510 if (pc->cap_user_rdpmc && idx) { in perf_mmap__read_self()
511 s64 evcnt = read_perf_counter(idx - 1); in perf_mmap__read_self()
512 u16 width = READ_ONCE(pc->pmc_width); in perf_mmap__read_self()
514 evcnt <<= 64 - width; in perf_mmap__read_self()
515 evcnt >>= 64 - width; in perf_mmap__read_self()
518 return -1; in perf_mmap__read_self()
521 } while (READ_ONCE(pc->lock) != seq); in perf_mmap__read_self()
523 if (count->ena != count->run) { in perf_mmap__read_self()
527 cyc = time_cycles + ((cyc - time_cycles) & time_mask); in perf_mmap__read_self()
531 count->ena += delta; in perf_mmap__read_self()
533 count->run += delta; in perf_mmap__read_self()
536 count->val = cnt; in perf_mmap__read_self()