Lines Matching +full:int +full:- +full:map +full:- +full:mask
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2011-2017, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
5 * Parts came from evlist.c builtin-{top,stat,record}.c, see those files for further
29 void mmap_cpu_mask__scnprintf(struct mmap_cpu_mask *mask, const char *tag) in mmap_cpu_mask__scnprintf() argument
34 len = bitmap_scnprintf(mask->bits, mask->nbits, buf, MASK_SIZE); in mmap_cpu_mask__scnprintf()
36 pr_debug("%p: %s mask[%zd]: %s\n", mask, tag, mask->nbits, buf); in mmap_cpu_mask__scnprintf()
39 size_t mmap__mmap_len(struct mmap *map) in mmap__mmap_len() argument
41 return perf_mmap__mmap_len(&map->core); in mmap__mmap_len()
44 int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused, in auxtrace_mmap__mmap()
47 int fd __maybe_unused) in auxtrace_mmap__mmap()
58 unsigned int auxtrace_pages __maybe_unused, in auxtrace_mmap_params__init()
66 int idx __maybe_unused) in auxtrace_mmap_params__set_idx()
71 static int perf_mmap__aio_enabled(struct mmap *map) in perf_mmap__aio_enabled() argument
73 return map->aio.nr_cblocks > 0; in perf_mmap__aio_enabled()
77 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
79 map->aio.data[idx] = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in perf_mmap__aio_alloc()
81 if (map->aio.data[idx] == MAP_FAILED) { in perf_mmap__aio_alloc()
82 map->aio.data[idx] = NULL; in perf_mmap__aio_alloc()
83 return -1; in perf_mmap__aio_alloc()
89 static void perf_mmap__aio_free(struct mmap *map, int idx) in perf_mmap__aio_free() argument
91 if (map->aio.data[idx]) { in perf_mmap__aio_free()
92 munmap(map->aio.data[idx], mmap__mmap_len(map)); in perf_mmap__aio_free()
93 map->aio.data[idx] = NULL; in perf_mmap__aio_free()
97 static int perf_mmap__aio_bind(struct mmap *map, int idx, struct perf_cpu cpu, int affinity) in perf_mmap__aio_bind() argument
103 int err = 0; in perf_mmap__aio_bind()
106 data = map->aio.data[idx]; in perf_mmap__aio_bind()
107 mmap_len = mmap__mmap_len(map); in perf_mmap__aio_bind()
111 pr_err("Failed to allocate node mask for mbind: error %m\n"); in perf_mmap__aio_bind()
112 return -1; in perf_mmap__aio_bind()
116 pr_err("Failed to bind [%p-%p] AIO buffer to node %lu: error %m\n", in perf_mmap__aio_bind()
118 err = -1; in perf_mmap__aio_bind()
126 static int perf_mmap__aio_alloc(struct mmap *map, int idx) in perf_mmap__aio_alloc() argument
128 map->aio.data[idx] = malloc(mmap__mmap_len(map)); in perf_mmap__aio_alloc()
129 if (map->aio.data[idx] == NULL) in perf_mmap__aio_alloc()
130 return -1; in perf_mmap__aio_alloc()
135 static void perf_mmap__aio_free(struct mmap *map, int idx) in perf_mmap__aio_free() argument
137 zfree(&(map->aio.data[idx])); in perf_mmap__aio_free()
140 static int perf_mmap__aio_bind(struct mmap *map __maybe_unused, int idx __maybe_unused, in perf_mmap__aio_bind()
141 struct perf_cpu cpu __maybe_unused, int affinity __maybe_unused) in perf_mmap__aio_bind()
147 static int perf_mmap__aio_mmap(struct mmap *map, struct mmap_params *mp) in perf_mmap__aio_mmap() argument
149 int delta_max, i, prio, ret; in perf_mmap__aio_mmap()
151 map->aio.nr_cblocks = mp->nr_cblocks; in perf_mmap__aio_mmap()
152 if (map->aio.nr_cblocks) { in perf_mmap__aio_mmap()
153 map->aio.aiocb = calloc(map->aio.nr_cblocks, sizeof(struct aiocb *)); in perf_mmap__aio_mmap()
154 if (!map->aio.aiocb) { in perf_mmap__aio_mmap()
156 return -1; in perf_mmap__aio_mmap()
158 map->aio.cblocks = calloc(map->aio.nr_cblocks, sizeof(struct aiocb)); in perf_mmap__aio_mmap()
159 if (!map->aio.cblocks) { in perf_mmap__aio_mmap()
161 return -1; in perf_mmap__aio_mmap()
163 map->aio.data = calloc(map->aio.nr_cblocks, sizeof(void *)); in perf_mmap__aio_mmap()
164 if (!map->aio.data) { in perf_mmap__aio_mmap()
166 return -1; in perf_mmap__aio_mmap()
169 for (i = 0; i < map->aio.nr_cblocks; ++i) { in perf_mmap__aio_mmap()
170 ret = perf_mmap__aio_alloc(map, i); in perf_mmap__aio_mmap()
171 if (ret == -1) { in perf_mmap__aio_mmap()
173 return -1; in perf_mmap__aio_mmap()
175 ret = perf_mmap__aio_bind(map, i, map->core.cpu, mp->affinity); in perf_mmap__aio_mmap()
176 if (ret == -1) in perf_mmap__aio_mmap()
177 return -1; in perf_mmap__aio_mmap()
179 * Use cblock.aio_fildes value different from -1 in perf_mmap__aio_mmap()
184 map->aio.cblocks[i].aio_fildes = -1; in perf_mmap__aio_mmap()
188 * are kept in separate per-prio queues and adding in perf_mmap__aio_mmap()
189 * a new request will iterate thru shorter per-prio in perf_mmap__aio_mmap()
193 prio = delta_max - i; in perf_mmap__aio_mmap()
194 map->aio.cblocks[i].aio_reqprio = prio >= 0 ? prio : 0; in perf_mmap__aio_mmap()
201 static void perf_mmap__aio_munmap(struct mmap *map) in perf_mmap__aio_munmap() argument
203 int i; in perf_mmap__aio_munmap()
205 for (i = 0; i < map->aio.nr_cblocks; ++i) in perf_mmap__aio_munmap()
206 perf_mmap__aio_free(map, i); in perf_mmap__aio_munmap()
207 if (map->aio.data) in perf_mmap__aio_munmap()
208 zfree(&map->aio.data); in perf_mmap__aio_munmap()
209 zfree(&map->aio.cblocks); in perf_mmap__aio_munmap()
210 zfree(&map->aio.aiocb); in perf_mmap__aio_munmap()
213 static int perf_mmap__aio_enabled(struct mmap *map __maybe_unused) in perf_mmap__aio_enabled()
218 static int perf_mmap__aio_mmap(struct mmap *map __maybe_unused, in perf_mmap__aio_mmap()
224 static void perf_mmap__aio_munmap(struct mmap *map __maybe_unused) in perf_mmap__aio_munmap()
229 void mmap__munmap(struct mmap *map) in mmap__munmap() argument
231 bitmap_free(map->affinity_mask.bits); in mmap__munmap()
233 zstd_fini(&map->zstd_data); in mmap__munmap()
235 perf_mmap__aio_munmap(map); in mmap__munmap()
236 if (map->data != NULL) { in mmap__munmap()
237 munmap(map->data, mmap__mmap_len(map)); in mmap__munmap()
238 map->data = NULL; in mmap__munmap()
240 auxtrace_mmap__munmap(&map->auxtrace_mmap); in mmap__munmap()
243 static void build_node_mask(int node, struct mmap_cpu_mask *mask) in build_node_mask() argument
245 int idx, nr_cpus; in build_node_mask()
255 cpu = perf_cpu_map__cpu(cpu_map, idx); /* map c index to online cpu index */ in build_node_mask()
257 __set_bit(cpu.cpu, mask->bits); in build_node_mask()
261 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp) in perf_mmap__setup_affinity_mask() argument
263 map->affinity_mask.nbits = cpu__max_cpu().cpu; in perf_mmap__setup_affinity_mask()
264 map->affinity_mask.bits = bitmap_zalloc(map->affinity_mask.nbits); in perf_mmap__setup_affinity_mask()
265 if (!map->affinity_mask.bits) in perf_mmap__setup_affinity_mask()
266 return -1; in perf_mmap__setup_affinity_mask()
268 if (mp->affinity == PERF_AFFINITY_NODE && cpu__max_node() > 1) in perf_mmap__setup_affinity_mask()
269 build_node_mask(cpu__get_node(map->core.cpu), &map->affinity_mask); in perf_mmap__setup_affinity_mask()
270 else if (mp->affinity == PERF_AFFINITY_CPU) in perf_mmap__setup_affinity_mask()
271 __set_bit(map->core.cpu.cpu, map->affinity_mask.bits); in perf_mmap__setup_affinity_mask()
276 int mmap__mmap(struct mmap *map, struct mmap_params *mp, int fd, struct perf_cpu cpu) in mmap__mmap() argument
278 if (perf_mmap__mmap(&map->core, &mp->core, fd, cpu)) { in mmap__mmap()
281 return -1; in mmap__mmap()
284 if (mp->affinity != PERF_AFFINITY_SYS && in mmap__mmap()
285 perf_mmap__setup_affinity_mask(map, mp)) { in mmap__mmap()
286 pr_debug2("failed to alloc mmap affinity mask, error %d\n", in mmap__mmap()
288 return -1; in mmap__mmap()
292 mmap_cpu_mask__scnprintf(&map->affinity_mask, "mmap"); in mmap__mmap()
294 map->core.flush = mp->flush; in mmap__mmap()
296 if (zstd_init(&map->zstd_data, mp->comp_level)) { in mmap__mmap()
298 return -1; in mmap__mmap()
301 if (mp->comp_level && !perf_mmap__aio_enabled(map)) { in mmap__mmap()
302 map->data = mmap(NULL, mmap__mmap_len(map), PROT_READ|PROT_WRITE, in mmap__mmap()
304 if (map->data == MAP_FAILED) { in mmap__mmap()
307 map->data = NULL; in mmap__mmap()
308 return -1; in mmap__mmap()
312 if (auxtrace_mmap__mmap(&map->auxtrace_mmap, in mmap__mmap()
313 &mp->auxtrace_mp, map->core.base, fd)) in mmap__mmap()
314 return -1; in mmap__mmap()
316 return perf_mmap__aio_mmap(map, mp); in mmap__mmap()
319 int perf_mmap__push(struct mmap *md, void *to, in perf_mmap__push()
320 int push(struct mmap *map, void *to, void *buf, size_t size)) in perf_mmap__push() argument
322 u64 head = perf_mmap__read_head(&md->core); in perf_mmap__push()
323 unsigned char *data = md->core.base + page_size; in perf_mmap__push()
326 int rc = 0; in perf_mmap__push()
328 rc = perf_mmap__read_init(&md->core); in perf_mmap__push()
330 return (rc == -EAGAIN) ? 1 : -1; in perf_mmap__push()
332 size = md->core.end - md->core.start; in perf_mmap__push()
334 if ((md->core.start & md->core.mask) + size != (md->core.end & md->core.mask)) { in perf_mmap__push()
335 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
336 size = md->core.mask + 1 - (md->core.start & md->core.mask); in perf_mmap__push()
337 md->core.start += size; in perf_mmap__push()
340 rc = -1; in perf_mmap__push()
345 buf = &data[md->core.start & md->core.mask]; in perf_mmap__push()
346 size = md->core.end - md->core.start; in perf_mmap__push()
347 md->core.start += size; in perf_mmap__push()
350 rc = -1; in perf_mmap__push()
354 md->core.prev = head; in perf_mmap__push()
355 perf_mmap__consume(&md->core); in perf_mmap__push()
360 int mmap_cpu_mask__duplicate(struct mmap_cpu_mask *original, struct mmap_cpu_mask *clone) in mmap_cpu_mask__duplicate()
362 clone->nbits = original->nbits; in mmap_cpu_mask__duplicate()
363 clone->bits = bitmap_zalloc(original->nbits); in mmap_cpu_mask__duplicate()
364 if (!clone->bits) in mmap_cpu_mask__duplicate()
365 return -ENOMEM; in mmap_cpu_mask__duplicate()
367 memcpy(clone->bits, original->bits, MMAP_CPU_MASK_BYTES(original)); in mmap_cpu_mask__duplicate()