Lines Matching +full:free +full:- +full:flowing

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * In-Memory Collection (IMC) Performance Monitor counter support.
13 #include <asm/imc-pmu.h>
22 * Used to avoid races in counting the nest-pmu units during hotplug
51 * core and trace-imc
61 return container_of(event->pmu, struct imc_pmu, pmu); in imc_event_to_pmu()
64 PMU_FORMAT_ATTR(event, "config:0-61");
65 PMU_FORMAT_ATTR(offset, "config:0-31");
67 PMU_FORMAT_ATTR(mode, "config:33-40");
81 /* Format attribute for imc trace-mode */
82 PMU_FORMAT_ATTR(cpmc_reserved, "config:0-19");
83 PMU_FORMAT_ATTR(cpmc_event, "config:20-27");
84 PMU_FORMAT_ATTR(cpmc_samplesel, "config:28-29");
85 PMU_FORMAT_ATTR(cpmc_load, "config:30-61");
109 switch(imc_pmu->domain){ in imc_pmu_cpumask_get_attr()
142 sysfs_attr_init(&attr->attr.attr); in device_str_attr_create()
144 attr->event_str = str; in device_str_attr_create()
145 attr->attr.attr.name = name; in device_str_attr_create()
146 attr->attr.attr.mode = 0444; in device_str_attr_create()
147 attr->attr.show = perf_event_sysfs_show; in device_str_attr_create()
149 return &attr->attr.attr; in device_str_attr_create()
162 event->value = base + reg; in imc_parse_event()
164 if (of_property_read_string(np, "event-name", &s)) in imc_parse_event()
167 event->name = kasprintf(GFP_KERNEL, "%s%s", prefix, s); in imc_parse_event()
168 if (!event->name) in imc_parse_event()
175 event->scale = kstrdup(s, GFP_KERNEL); in imc_parse_event()
176 if (!event->scale) in imc_parse_event()
184 event->unit = kstrdup(s, GFP_KERNEL); in imc_parse_event()
185 if (!event->unit) in imc_parse_event()
191 kfree(event->unit); in imc_parse_event()
192 kfree(event->scale); in imc_parse_event()
193 kfree(event->name); in imc_parse_event()
194 return -EINVAL; in imc_parse_event()
244 if (of_property_read_string(node, "events-prefix", &prefix)) { in update_events_in_group()
260 pmu->events = kcalloc(ct, sizeof(struct imc_events), GFP_KERNEL); in update_events_in_group()
261 if (!pmu->events) { in update_events_in_group()
263 return -ENOMEM; in update_events_in_group()
269 ret = imc_parse_event(np, g_scale, g_unit, prefix, base_reg, &pmu->events[ct]); in update_events_in_group()
279 imc_free_events(pmu->events, ct); in update_events_in_group()
280 return -ENOMEM; in update_events_in_group()
287 * "ct" has the total event structs added from the events-parent node. in update_events_in_group()
294 imc_free_events(pmu->events, ct); in update_events_in_group()
295 return -ENOMEM; in update_events_in_group()
298 attr_group->name = "events"; in update_events_in_group()
299 attr_group->attrs = attrs; in update_events_in_group()
301 ev_val_str = kasprintf(GFP_KERNEL, "event=0x%x", pmu->events[i].value); in update_events_in_group()
304 dev_str = device_str_attr_create(pmu->events[i].name, ev_val_str); in update_events_in_group()
309 if (pmu->events[i].scale) { in update_events_in_group()
310 ev_scale_str = kasprintf(GFP_KERNEL, "%s.scale", pmu->events[i].name); in update_events_in_group()
313 dev_str = device_str_attr_create(ev_scale_str, pmu->events[i].scale); in update_events_in_group()
320 if (pmu->events[i].unit) { in update_events_in_group()
321 ev_unit_str = kasprintf(GFP_KERNEL, "%s.unit", pmu->events[i].name); in update_events_in_group()
324 dev_str = device_str_attr_create(ev_unit_str, pmu->events[i].unit); in update_events_in_group()
333 pmu->attr_groups[IMC_EVENT_ATTR] = attr_group; in update_events_in_group()
352 perf_pmu_migrate_context(&(*pn)->pmu, old_cpu, new_cpu); in nest_change_cpu_context()
359 int nid, target = -1; in ppc_nest_imc_cpu_offline()
414 return -EINVAL; in ppc_nest_imc_cpu_offline()
416 ref->refc = 0; in ppc_nest_imc_cpu_offline()
464 if (event->cpu < 0) in nest_imc_counters_release()
467 node_id = cpu_to_node(event->cpu); in nest_imc_counters_release()
475 ref = get_nest_pmu_ref(event->cpu); in nest_imc_counters_release()
480 spin_lock(&ref->lock); in nest_imc_counters_release()
481 if (ref->refc == 0) { in nest_imc_counters_release()
487 * function set the ref->count to zero, if the cpu which is in nest_imc_counters_release()
492 spin_unlock(&ref->lock); in nest_imc_counters_release()
495 ref->refc--; in nest_imc_counters_release()
496 if (ref->refc == 0) { in nest_imc_counters_release()
498 get_hard_smp_processor_id(event->cpu)); in nest_imc_counters_release()
500 spin_unlock(&ref->lock); in nest_imc_counters_release()
501 pr_err("nest-imc: Unable to stop the counters for core %d\n", node_id); in nest_imc_counters_release()
504 } else if (ref->refc < 0) { in nest_imc_counters_release()
505 WARN(1, "nest-imc: Invalid event reference count\n"); in nest_imc_counters_release()
506 ref->refc = 0; in nest_imc_counters_release()
508 spin_unlock(&ref->lock); in nest_imc_counters_release()
514 u32 l_config, config = event->attr.config; in nest_imc_event_init()
520 if (event->attr.type != event->pmu->type) in nest_imc_event_init()
521 return -ENOENT; in nest_imc_event_init()
524 if (event->hw.sample_period) in nest_imc_event_init()
525 return -EINVAL; in nest_imc_event_init()
527 if (event->cpu < 0) in nest_imc_event_init()
528 return -EINVAL; in nest_imc_event_init()
533 if ((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size) in nest_imc_event_init()
534 return -EINVAL; in nest_imc_event_init()
537 * Nest HW counter memory resides in a per-chip reserve-memory (HOMER). in nest_imc_event_init()
540 chip_id = cpu_to_chip_id(event->cpu); in nest_imc_event_init()
544 return -ENODEV; in nest_imc_event_init()
546 pcni = pmu->mem_info; in nest_imc_event_init()
548 if (pcni->id == chip_id) { in nest_imc_event_init()
553 } while (pcni->vbase); in nest_imc_event_init()
556 return -ENODEV; in nest_imc_event_init()
562 event->hw.event_base = (u64)pcni->vbase + l_config; in nest_imc_event_init()
563 node_id = cpu_to_node(event->cpu); in nest_imc_event_init()
569 ref = get_nest_pmu_ref(event->cpu); in nest_imc_event_init()
571 return -EINVAL; in nest_imc_event_init()
573 spin_lock(&ref->lock); in nest_imc_event_init()
574 if (ref->refc == 0) { in nest_imc_event_init()
576 get_hard_smp_processor_id(event->cpu)); in nest_imc_event_init()
578 spin_unlock(&ref->lock); in nest_imc_event_init()
579 pr_err("nest-imc: Unable to start the counters for node %d\n", in nest_imc_event_init()
584 ++ref->refc; in nest_imc_event_init()
585 spin_unlock(&ref->lock); in nest_imc_event_init()
587 event->destroy = nest_imc_counters_release; in nest_imc_event_init()
610 mem_info = &core_imc_pmu->mem_info[core_id]; in core_imc_mem_init()
611 mem_info->id = core_id; in core_imc_mem_init()
618 return -ENOMEM; in core_imc_mem_init()
619 mem_info->vbase = page_address(page); in core_imc_mem_init()
625 __pa((void *)mem_info->vbase), in core_imc_mem_init()
628 free_pages((u64)mem_info->vbase, get_order(size)); in core_imc_mem_init()
629 mem_info->vbase = NULL; in core_imc_mem_init()
640 mem_info = &core_imc_pmu->mem_info[core_id]; in is_core_imc_mem_inited()
641 if (!mem_info->vbase) in is_core_imc_mem_inited()
661 ret = core_imc_mem_init(cpu, core_imc_pmu->counter_mem_size); in ppc_core_imc_cpu_online()
697 if (!core_imc_pmu->pmu.event_init) in ppc_core_imc_cpu_offline()
708 perf_pmu_migrate_context(&core_imc_pmu->pmu, cpu, ncpu); in ppc_core_imc_cpu_offline()
719 return -EINVAL; in ppc_core_imc_cpu_offline()
721 ref->refc = 0; in ppc_core_imc_cpu_offline()
724 * last cpu in this core and core-imc event running in ppc_core_imc_cpu_offline()
729 imc_global_refc.refc--; in ppc_core_imc_cpu_offline()
747 imc_global_refc.refc--; in reset_global_refc()
766 if (event->cpu < 0) in core_imc_counters_release()
774 core_id = event->cpu / threads_per_core; in core_imc_counters_release()
781 spin_lock(&ref->lock); in core_imc_counters_release()
782 if (ref->refc == 0) { in core_imc_counters_release()
788 * function set the ref->count to zero, if the cpu which is in core_imc_counters_release()
793 spin_unlock(&ref->lock); in core_imc_counters_release()
796 ref->refc--; in core_imc_counters_release()
797 if (ref->refc == 0) { in core_imc_counters_release()
799 get_hard_smp_processor_id(event->cpu)); in core_imc_counters_release()
801 spin_unlock(&ref->lock); in core_imc_counters_release()
805 } else if (ref->refc < 0) { in core_imc_counters_release()
806 WARN(1, "core-imc: Invalid event reference count\n"); in core_imc_counters_release()
807 ref->refc = 0; in core_imc_counters_release()
809 spin_unlock(&ref->lock); in core_imc_counters_release()
817 u64 config = event->attr.config; in core_imc_event_init()
822 if (event->attr.type != event->pmu->type) in core_imc_event_init()
823 return -ENOENT; in core_imc_event_init()
826 if (event->hw.sample_period) in core_imc_event_init()
827 return -EINVAL; in core_imc_event_init()
829 if (event->cpu < 0) in core_imc_event_init()
830 return -EINVAL; in core_imc_event_init()
832 event->hw.idx = -1; in core_imc_event_init()
836 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) in core_imc_event_init()
837 return -EINVAL; in core_imc_event_init()
839 if (!is_core_imc_mem_inited(event->cpu)) in core_imc_event_init()
840 return -ENODEV; in core_imc_event_init()
842 core_id = event->cpu / threads_per_core; in core_imc_event_init()
843 pcmi = &core_imc_pmu->mem_info[core_id]; in core_imc_event_init()
844 if ((!pcmi->vbase)) in core_imc_event_init()
845 return -ENODEV; in core_imc_event_init()
849 return -EINVAL; in core_imc_event_init()
857 spin_lock(&ref->lock); in core_imc_event_init()
858 if (ref->refc == 0) { in core_imc_event_init()
860 get_hard_smp_processor_id(event->cpu)); in core_imc_event_init()
862 spin_unlock(&ref->lock); in core_imc_event_init()
863 pr_err("core-imc: Unable to start the counters for core %d\n", in core_imc_event_init()
868 ++ref->refc; in core_imc_event_init()
869 spin_unlock(&ref->lock); in core_imc_event_init()
872 * Since the system can run either in accumulation or trace-mode in core_imc_event_init()
873 * of IMC at a time, core-imc events are allowed only if no other in core_imc_event_init()
884 * the system, so set the refc.id to core-imc. in core_imc_event_init()
890 return -EBUSY; in core_imc_event_init()
894 event->hw.event_base = (u64)pcmi->vbase + (config & IMC_EVENT_OFFSET_MASK); in core_imc_event_init()
895 event->destroy = core_imc_counters_release; in core_imc_event_init()
903 * written to the LDBAR for that cpu, when the thread-imc event
909 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
916 * | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - | - - - - |
929 * free the memory in cpu offline path. in thread_imc_mem_alloc()
935 return -ENOMEM; in thread_imc_mem_alloc()
957 * For thread-imc, bit 0 of LDBAR will be set to 1 in the in ppc_thread_imc_cpu_offline()
963 /* Reduce the refc if thread-imc event running on this cpu */ in ppc_thread_imc_cpu_offline()
966 imc_global_refc.refc--; in ppc_thread_imc_cpu_offline()
982 u32 config = event->attr.config; in thread_imc_event_init()
986 if (event->attr.type != event->pmu->type) in thread_imc_event_init()
987 return -ENOENT; in thread_imc_event_init()
990 return -EACCES; in thread_imc_event_init()
993 if (event->hw.sample_period) in thread_imc_event_init()
994 return -EINVAL; in thread_imc_event_init()
996 event->hw.idx = -1; in thread_imc_event_init()
1000 if (((config & IMC_EVENT_OFFSET_MASK) > pmu->counter_mem_size)) in thread_imc_event_init()
1001 return -EINVAL; in thread_imc_event_init()
1003 target = event->hw.target; in thread_imc_event_init()
1005 return -EINVAL; in thread_imc_event_init()
1010 * system, if not set the global id to thread-imc. in thread_imc_event_init()
1017 return -EBUSY; in thread_imc_event_init()
1021 event->pmu->task_ctx_nr = perf_sw_context; in thread_imc_event_init()
1022 event->destroy = reset_global_refc; in thread_imc_event_init()
1028 if (!strncmp(event->pmu->name, "thread_imc", strlen("thread_imc"))) in is_thread_imc_pmu()
1040 return (__be64 *)(addr + (event->attr.config & IMC_EVENT_OFFSET_MASK)); in get_event_base_addr()
1043 return (__be64 *)event->hw.event_base; in get_event_base_addr()
1071 * In-Memory Collection (IMC) counters are free flowing counters. in imc_read_counter()
1078 local64_set(&event->hw.prev_count, data); in imc_read_counter()
1087 counter_prev = local64_read(&event->hw.prev_count); in imc_event_update()
1089 final_count = counter_new - counter_prev; in imc_event_update()
1092 local64_add(final_count, &event->count); in imc_event_update()
1098 * In Memory Counters are free flowing counters. HW or the microcode in imc_event_start()
1133 return -EINVAL; in thread_imc_event_add()
1147 return -EINVAL; in thread_imc_event_add()
1149 spin_lock(&ref->lock); in thread_imc_event_add()
1150 if (ref->refc == 0) { in thread_imc_event_add()
1153 spin_unlock(&ref->lock); in thread_imc_event_add()
1154 pr_err("thread-imc: Unable to start the counter\ in thread_imc_event_add()
1156 return -EINVAL; in thread_imc_event_add()
1159 ++ref->refc; in thread_imc_event_add()
1160 spin_unlock(&ref->lock); in thread_imc_event_add()
1177 spin_lock(&ref->lock); in thread_imc_event_del()
1178 ref->refc--; in thread_imc_event_del()
1179 if (ref->refc == 0) { in thread_imc_event_del()
1182 spin_unlock(&ref->lock); in thread_imc_event_del()
1183 pr_err("thread-imc: Unable to stop the counters\ in thread_imc_event_del()
1187 } else if (ref->refc < 0) { in thread_imc_event_del()
1188 ref->refc = 0; in thread_imc_event_del()
1190 spin_unlock(&ref->lock); in thread_imc_event_del()
1218 return -ENOMEM; in trace_imc_mem_alloc()
1247 * it is set to zero for imc trace-mode in ppc_trace_imc_cpu_offline()
1249 * Reduce the refc if any trace-imc event running in ppc_trace_imc_cpu_offline()
1254 imc_global_refc.refc--; in ppc_trace_imc_cpu_offline()
1274 * Function to parse trace-imc data obtained
1284 if (be64_to_cpu(READ_ONCE(mem->tb1)) > *prev_tb) in trace_imc_prepare_sample()
1285 *prev_tb = be64_to_cpu(READ_ONCE(mem->tb1)); in trace_imc_prepare_sample()
1287 return -EINVAL; in trace_imc_prepare_sample()
1289 if ((be64_to_cpu(READ_ONCE(mem->tb1)) & IMC_TRACE_RECORD_TB1_MASK) != in trace_imc_prepare_sample()
1290 be64_to_cpu(READ_ONCE(mem->tb2))) in trace_imc_prepare_sample()
1291 return -EINVAL; in trace_imc_prepare_sample()
1294 data->ip = be64_to_cpu(READ_ONCE(mem->ip)); in trace_imc_prepare_sample()
1295 data->period = event->hw.last_period; in trace_imc_prepare_sample()
1297 header->type = PERF_RECORD_SAMPLE; in trace_imc_prepare_sample()
1298 header->size = sizeof(*header) + event->header_size; in trace_imc_prepare_sample()
1299 header->misc = 0; in trace_imc_prepare_sample()
1302 switch (IMC_TRACE_RECORD_VAL_HVPR(be64_to_cpu(READ_ONCE(mem->val)))) { in trace_imc_prepare_sample()
1303 case 0:/* when MSR HV and PR not set in the trace-record */ in trace_imc_prepare_sample()
1304 header->misc |= PERF_RECORD_MISC_GUEST_KERNEL; in trace_imc_prepare_sample()
1307 header->misc |= PERF_RECORD_MISC_GUEST_USER; in trace_imc_prepare_sample()
1310 header->misc |= PERF_RECORD_MISC_KERNEL; in trace_imc_prepare_sample()
1313 header->misc |= PERF_RECORD_MISC_USER; in trace_imc_prepare_sample()
1320 if (is_kernel_addr(data->ip)) in trace_imc_prepare_sample()
1321 header->misc |= PERF_RECORD_MISC_KERNEL; in trace_imc_prepare_sample()
1323 header->misc |= PERF_RECORD_MISC_USER; in trace_imc_prepare_sample()
1364 /* Set trace-imc bit in ldbar and load ldbar with per-thread memory address */ in trace_imc_event_add()
1368 /* trace-imc reference count */ in trace_imc_event_add()
1373 return -EINVAL; in trace_imc_event_add()
1377 spin_lock(&ref->lock); in trace_imc_event_add()
1378 if (ref->refc == 0) { in trace_imc_event_add()
1381 spin_unlock(&ref->lock); in trace_imc_event_add()
1382 pr_err("trace-imc: Unable to start the counters for core %d\n", core_id); in trace_imc_event_add()
1383 return -EINVAL; in trace_imc_event_add()
1386 ++ref->refc; in trace_imc_event_add()
1387 spin_unlock(&ref->lock); in trace_imc_event_add()
1420 spin_lock(&ref->lock); in trace_imc_event_del()
1421 ref->refc--; in trace_imc_event_del()
1422 if (ref->refc == 0) { in trace_imc_event_del()
1425 spin_unlock(&ref->lock); in trace_imc_event_del()
1426 pr_err("trace-imc: Unable to stop the counters for core %d\n", core_id); in trace_imc_event_del()
1429 } else if (ref->refc < 0) { in trace_imc_event_del()
1430 ref->refc = 0; in trace_imc_event_del()
1432 spin_unlock(&ref->lock); in trace_imc_event_del()
1439 if (event->attr.type != event->pmu->type) in trace_imc_event_init()
1440 return -ENOENT; in trace_imc_event_init()
1443 return -EACCES; in trace_imc_event_init()
1446 if (event->attr.sample_period == 0) in trace_imc_event_init()
1447 return -ENOENT; in trace_imc_event_init()
1458 * system, so set the refc.id to trace-imc. in trace_imc_event_init()
1464 return -EBUSY; in trace_imc_event_init()
1468 event->hw.idx = -1; in trace_imc_event_init()
1474 event->pmu->task_ctx_nr = perf_sw_context; in trace_imc_event_init()
1475 event->destroy = reset_global_refc; in trace_imc_event_init()
1482 pmu->pmu.task_ctx_nr = perf_invalid_context; in update_pmu_ops()
1483 pmu->pmu.add = imc_event_add; in update_pmu_ops()
1484 pmu->pmu.del = imc_event_stop; in update_pmu_ops()
1485 pmu->pmu.start = imc_event_start; in update_pmu_ops()
1486 pmu->pmu.stop = imc_event_stop; in update_pmu_ops()
1487 pmu->pmu.read = imc_event_update; in update_pmu_ops()
1488 pmu->pmu.attr_groups = pmu->attr_groups; in update_pmu_ops()
1489 pmu->pmu.capabilities = PERF_PMU_CAP_NO_EXCLUDE; in update_pmu_ops()
1490 pmu->attr_groups[IMC_FORMAT_ATTR] = &imc_format_group; in update_pmu_ops()
1492 switch (pmu->domain) { in update_pmu_ops()
1494 pmu->pmu.event_init = nest_imc_event_init; in update_pmu_ops()
1495 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; in update_pmu_ops()
1498 pmu->pmu.event_init = core_imc_event_init; in update_pmu_ops()
1499 pmu->attr_groups[IMC_CPUMASK_ATTR] = &imc_pmu_cpumask_attr_group; in update_pmu_ops()
1502 pmu->pmu.event_init = thread_imc_event_init; in update_pmu_ops()
1503 pmu->pmu.add = thread_imc_event_add; in update_pmu_ops()
1504 pmu->pmu.del = thread_imc_event_del; in update_pmu_ops()
1505 pmu->pmu.start_txn = thread_imc_pmu_start_txn; in update_pmu_ops()
1506 pmu->pmu.cancel_txn = thread_imc_pmu_cancel_txn; in update_pmu_ops()
1507 pmu->pmu.commit_txn = thread_imc_pmu_commit_txn; in update_pmu_ops()
1510 pmu->pmu.event_init = trace_imc_event_init; in update_pmu_ops()
1511 pmu->pmu.add = trace_imc_event_add; in update_pmu_ops()
1512 pmu->pmu.del = trace_imc_event_del; in update_pmu_ops()
1513 pmu->pmu.start = trace_imc_event_start; in update_pmu_ops()
1514 pmu->pmu.stop = trace_imc_event_stop; in update_pmu_ops()
1515 pmu->pmu.read = trace_imc_event_read; in update_pmu_ops()
1516 pmu->attr_groups[IMC_FORMAT_ATTR] = &trace_imc_format_group; in update_pmu_ops()
1534 return -ENOMEM; in init_nest_pmu_ref()
1572 struct imc_mem_info *ptr = core_imc_pmu->mem_info; in cleanup_all_core_imc_memory()
1573 int size = core_imc_pmu->counter_mem_size; in cleanup_all_core_imc_memory()
1588 * By setting 0th bit of LDBAR to zero, we disable thread-imc in thread_imc_ldbar_disable()
1622 /* Function to free the attr_groups which are dynamically allocated */
1625 if (pmu_ptr->attr_groups[IMC_EVENT_ATTR]) in imc_common_mem_free()
1626 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]->attrs); in imc_common_mem_free()
1627 kfree(pmu_ptr->attr_groups[IMC_EVENT_ATTR]); in imc_common_mem_free()
1632 * free the memory.
1638 if (pmu_ptr->domain == IMC_DOMAIN_NEST) { in imc_common_cpuhp_mem_free()
1648 nest_pmus--; in imc_common_cpuhp_mem_free()
1652 /* Free core_imc memory */ in imc_common_cpuhp_mem_free()
1653 if (pmu_ptr->domain == IMC_DOMAIN_CORE) { in imc_common_cpuhp_mem_free()
1658 /* Free thread_imc memory */ in imc_common_cpuhp_mem_free()
1659 if (pmu_ptr->domain == IMC_DOMAIN_THREAD) { in imc_common_cpuhp_mem_free()
1664 if (pmu_ptr->domain == IMC_DOMAIN_TRACE) { in imc_common_cpuhp_mem_free()
1671 * Function to unregister thread-imc if core-imc
1678 perf_pmu_unregister(&thread_imc_pmu->pmu); in unregister_thread_imc()
1688 int nr_cores, cpu, res = -ENOMEM; in imc_mem_init()
1691 return -ENODEV; in imc_mem_init()
1693 switch (pmu_ptr->domain) { in imc_mem_init()
1696 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s_imc", "nest_", s); in imc_mem_init()
1697 if (!pmu_ptr->pmu.name) in imc_mem_init()
1712 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1713 if (!pmu_ptr->pmu.name) in imc_mem_init()
1717 pmu_ptr->mem_info = kcalloc(nr_cores, sizeof(struct imc_mem_info), in imc_mem_init()
1720 if (!pmu_ptr->mem_info) in imc_mem_init()
1727 kfree(pmu_ptr->mem_info); in imc_mem_init()
1735 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1736 if (!pmu_ptr->pmu.name) in imc_mem_init()
1739 thread_imc_mem_size = pmu_ptr->counter_mem_size; in imc_mem_init()
1741 res = thread_imc_mem_alloc(cpu, pmu_ptr->counter_mem_size); in imc_mem_init()
1752 pmu_ptr->pmu.name = kasprintf(GFP_KERNEL, "%s%s", s, "_imc"); in imc_mem_init()
1753 if (!pmu_ptr->pmu.name) in imc_mem_init()
1754 return -ENOMEM; in imc_mem_init()
1760 return -ENOMEM; in imc_mem_init()
1762 trace_imc_mem_size = pmu_ptr->counter_mem_size; in imc_mem_init()
1772 return -EINVAL; in imc_mem_init()
1798 switch (pmu_ptr->domain) { in init_imc_pmu()
1853 return -EINVAL; /* Unknown domain */ in init_imc_pmu()
1864 ret = perf_pmu_register(&pmu_ptr->pmu, pmu_ptr->pmu.name, -1); in init_imc_pmu()
1869 pmu_ptr->pmu.name); in init_imc_pmu()