Lines Matching +full:cci +full:- +full:control +full:- +full:port

1 // SPDX-License-Identifier: GPL-2.0
2 // CCI Cache Coherent Interconnect PMU driver
3 // Copyright (C) 2013-2018 Arm Ltd.
6 #include <linux/arm-cci.h>
16 #define DRIVER_NAME "ARM-CCI PMU"
35 #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size)
37 #define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1)
38 #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1)
41 ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs)
77 * @fixed_hw_cntrs - Number of fixed event counters
78 * @num_hw_cntrs - Maximum number of programmable event counters
79 * @cntr_size - Size of an event counter mapping
147 /* Port ids */
160 * Instead of an event id to monitor CCI cycles, a dedicated counter is
161 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
172 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
174 * associated with each port type.
176 * Additionally, the range of events associated with the port types changed
179 * The constants below define the range of valid codes for each port type for
212 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
213 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"),
304 return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var); in cci400_pmu_cycle_event_show()
315 if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) in cci400_get_event_idx()
316 return -EAGAIN; in cci400_get_event_idx()
322 if (!test_and_set_bit(idx, hw->used_mask)) in cci400_get_event_idx()
326 return -EAGAIN; in cci400_get_event_idx()
336 return -ENOENT; in cci400_validate_hw_event()
357 return -ENOENT; in cci400_validate_hw_event()
360 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci400_validate_hw_event()
361 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci400_validate_hw_event()
364 return -ENOENT; in cci400_validate_hw_event()
370 rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; in probe_cci400_revision()
395 * CCI5xx PMU event id is an 9-bit value made of two parts.
396 * bits [8:5] - Source for the event
397 * bits [4:0] - Event code (specific to type of interface)
402 /* Port ids */
448 CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"),
449 CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"),
524 (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); in cci5xx_pmu_global_event_show()
531 * 0x0-0x6 - Slave interfaces
532 * 0x8-0xD - Master interfaces
533 * 0xf - Global Events
534 * 0x7,0xe - Reserved
544 return -ENOENT; in cci500_validate_hw_event()
568 return -ENOENT; in cci500_validate_hw_event()
571 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci500_validate_hw_event()
572 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci500_validate_hw_event()
575 return -ENOENT; in cci500_validate_hw_event()
582 * 0x0-0x6 - Slave interfaces
583 * 0x8-0xe - Master interfaces
584 * 0xf - Global Events
585 * 0x7 - Reserved
595 return -ENOENT; in cci550_validate_hw_event()
620 return -ENOENT; in cci550_validate_hw_event()
623 if (ev_code >= cci_pmu->model->event_ranges[if_type].min && in cci550_validate_hw_event()
624 ev_code <= cci_pmu->model->event_ranges[if_type].max) in cci550_validate_hw_event()
627 return -ENOENT; in cci550_validate_hw_event()
633 * Program the CCI PMU counters which have PERF_HES_ARCH set
640 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; in cci_pmu_sync_counters()
644 for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { in cci_pmu_sync_counters()
645 struct perf_event *event = cci_hw->events[i]; in cci_pmu_sync_counters()
651 if (event->hw.state & PERF_HES_STOPPED) in cci_pmu_sync_counters()
653 if (event->hw.state & PERF_HES_ARCH) { in cci_pmu_sync_counters()
655 event->hw.state &= ~PERF_HES_ARCH; in cci_pmu_sync_counters()
662 /* Should be called with cci_pmu->hw_events->pmu_lock held */
668 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; in __cci_pmu_enable_nosync()
669 writel(val, cci_pmu->ctrl_base + CCI_PMCR); in __cci_pmu_enable_nosync()
672 /* Should be called with cci_pmu->hw_events->pmu_lock held */
679 /* Should be called with cci_pmu->hw_events->pmu_lock held */
685 val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; in __cci_pmu_disable()
686 writel(val, cci_pmu->ctrl_base + CCI_PMCR); in __cci_pmu_disable()
696 (unsigned long)eattr->var); in cci_pmu_event_show()
706 return readl_relaxed(cci_pmu->base + in pmu_read_register()
707 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_read_register()
713 writel_relaxed(value, cci_pmu->base + in pmu_write_register()
714 CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); in pmu_write_register()
739 * For all counters on the CCI-PMU, disable any 'enabled' counters,
746 * cci_pm->hw_events->pmu_lock).
755 for (i = 0; i < cci_pmu->num_cntrs; i++) { in pmu_save_counters()
772 for_each_set_bit(i, mask, cci_pmu->num_cntrs) in pmu_restore_counters()
778 * by the cci
782 return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & in pmu_get_max_counters()
788 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_get_event_idx()
789 unsigned long cci_event = event->hw.config_base; in pmu_get_event_idx()
792 if (cci_pmu->model->get_event_idx) in pmu_get_event_idx()
793 return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); in pmu_get_event_idx()
797 if (!test_and_set_bit(idx, hw->used_mask)) in pmu_get_event_idx()
801 return -EAGAIN; in pmu_get_event_idx()
806 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_map_event()
808 if (event->attr.type < PERF_TYPE_MAX || in pmu_map_event()
809 !cci_pmu->model->validate_hw_event) in pmu_map_event()
810 return -ENOENT; in pmu_map_event()
812 return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); in pmu_map_event()
818 struct platform_device *pmu_device = cci_pmu->plat_device; in pmu_request_irq()
821 return -ENODEV; in pmu_request_irq()
823 if (cci_pmu->nr_irqs < 1) { in pmu_request_irq()
824 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); in pmu_request_irq()
825 return -ENODEV; in pmu_request_irq()
829 * Register all available CCI PMU interrupts. In the interrupt handler in pmu_request_irq()
833 * This should allow handling of non-unique interrupt for the counters. in pmu_request_irq()
835 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_request_irq()
836 int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, in pmu_request_irq()
837 "arm-cci-pmu", cci_pmu); in pmu_request_irq()
839 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", in pmu_request_irq()
840 cci_pmu->irqs[i]); in pmu_request_irq()
844 set_bit(i, &cci_pmu->active_irqs); in pmu_request_irq()
854 for (i = 0; i < cci_pmu->nr_irqs; i++) { in pmu_free_irq()
855 if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) in pmu_free_irq()
858 free_irq(cci_pmu->irqs[i], cci_pmu); in pmu_free_irq()
864 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in pmu_read_counter()
865 struct hw_perf_event *hw_counter = &event->hw; in pmu_read_counter()
866 int idx = hw_counter->idx; in pmu_read_counter()
870 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in pmu_read_counter()
886 struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; in __pmu_write_counters()
888 for_each_set_bit(i, mask, cci_pmu->num_cntrs) { in __pmu_write_counters()
889 struct perf_event *event = cci_hw->events[i]; in __pmu_write_counters()
893 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); in __pmu_write_counters()
899 if (cci_pmu->model->write_counters) in pmu_write_counters()
900 cci_pmu->model->write_counters(cci_pmu, mask); in pmu_write_counters()
908 * CCI-500/CCI-550 has advanced power saving policies, which could gate the
919 * For each counter to be programmed, repeat steps 3-7:
921 * 3) Write an invalid event code to the event control register for the
923 * 4) Enable the counter control for the counter.
931 * We choose an event which for CCI-5xx is guaranteed not to count.
941 bitmap_zero(saved_mask, cci_pmu->num_cntrs); in cci5xx_pmu_write_counters()
950 for_each_set_bit(i, mask, cci_pmu->num_cntrs) { in cci5xx_pmu_write_counters()
951 struct perf_event *event = cci_pmu->hw_events.events[i]; in cci5xx_pmu_write_counters()
958 pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); in cci5xx_pmu_write_counters()
960 pmu_set_event(cci_pmu, i, event->hw.config_base); in cci5xx_pmu_write_counters()
972 struct hw_perf_event *hwc = &event->hw; in pmu_event_update()
976 prev_raw_count = local64_read(&hwc->prev_count); in pmu_event_update()
978 } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, in pmu_event_update()
981 delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; in pmu_event_update()
983 local64_add(delta, &event->count); in pmu_event_update()
995 struct hw_perf_event *hwc = &event->hw; in pmu_event_set_period()
997 * The CCI PMU counters have a period of 2^32. To account for the in pmu_event_set_period()
1003 local64_set(&hwc->prev_count, val); in pmu_event_set_period()
1006 * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose in pmu_event_set_period()
1007 * values needs to be sync-ed with the s/w state before the PMU is in pmu_event_set_period()
1011 hwc->state |= PERF_HES_ARCH; in pmu_event_set_period()
1017 struct cci_pmu_hw_events *events = &cci_pmu->hw_events; in pmu_handle_irq()
1020 raw_spin_lock(&events->pmu_lock); in pmu_handle_irq()
1026 * This should work regardless of whether we have per-counter overflow in pmu_handle_irq()
1030 struct perf_event *event = events->events[idx]; in pmu_handle_irq()
1050 raw_spin_unlock(&events->pmu_lock); in pmu_handle_irq()
1072 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in hw_perf_event_destroy()
1073 atomic_t *active_events = &cci_pmu->active_events; in hw_perf_event_destroy()
1074 struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; in hw_perf_event_destroy()
1085 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_enable()
1086 bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs); in cci_pmu_enable()
1092 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); in cci_pmu_enable()
1094 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); in cci_pmu_enable()
1101 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_disable()
1104 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); in cci_pmu_disable()
1106 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); in cci_pmu_disable()
1110 * Check if the idx represents a non-programmable counter.
1116 return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); in pmu_fixed_hw_idx()
1121 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_start()
1122 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_start()
1123 struct hw_perf_event *hwc = &event->hw; in cci_pmu_start()
1124 int idx = hwc->idx; in cci_pmu_start()
1132 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in cci_pmu_start()
1134 hwc->state = 0; in cci_pmu_start()
1137 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_start()
1141 raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); in cci_pmu_start()
1145 pmu_set_event(cci_pmu, idx, hwc->config_base); in cci_pmu_start()
1150 raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); in cci_pmu_start()
1155 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_stop()
1156 struct hw_perf_event *hwc = &event->hw; in cci_pmu_stop()
1157 int idx = hwc->idx; in cci_pmu_stop()
1159 if (hwc->state & PERF_HES_STOPPED) in cci_pmu_stop()
1163 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); in cci_pmu_stop()
1173 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in cci_pmu_stop()
1178 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_add()
1179 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_add()
1180 struct hw_perf_event *hwc = &event->hw; in cci_pmu_add()
1188 event->hw.idx = idx; in cci_pmu_add()
1189 hw_events->events[idx] = event; in cci_pmu_add()
1191 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in cci_pmu_add()
1203 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_del()
1204 struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; in cci_pmu_del()
1205 struct hw_perf_event *hwc = &event->hw; in cci_pmu_del()
1206 int idx = hwc->idx; in cci_pmu_del()
1209 hw_events->events[idx] = NULL; in cci_pmu_del()
1210 clear_bit(idx, hw_events->used_mask); in cci_pmu_del()
1223 * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The in validate_event()
1224 * core perf code won't check that the pmu->ctx == leader->ctx in validate_event()
1225 * until after pmu->event_init(event). in validate_event()
1227 if (event->pmu != cci_pmu) in validate_event()
1230 if (event->state < PERF_EVENT_STATE_OFF) in validate_event()
1233 if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) in validate_event()
1241 struct perf_event *sibling, *leader = event->group_leader; in validate_group()
1242 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in validate_group()
1251 bitmap_zero(mask, cci_pmu->num_cntrs); in validate_group()
1253 if (!validate_event(event->pmu, &fake_pmu, leader)) in validate_group()
1254 return -EINVAL; in validate_group()
1257 if (!validate_event(event->pmu, &fake_pmu, sibling)) in validate_group()
1258 return -EINVAL; in validate_group()
1261 if (!validate_event(event->pmu, &fake_pmu, event)) in validate_group()
1262 return -EINVAL; in validate_group()
1269 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
1275 pr_debug("event %x:%llx not supported\n", event->attr.type, in __hw_perf_event_init()
1276 event->attr.config); in __hw_perf_event_init()
1282 * hardware. Use -1 to signify that we haven't decided where to put it in __hw_perf_event_init()
1285 hwc->idx = -1; in __hw_perf_event_init()
1286 hwc->config_base = 0; in __hw_perf_event_init()
1287 hwc->config = 0; in __hw_perf_event_init()
1288 hwc->event_base = 0; in __hw_perf_event_init()
1293 hwc->config_base |= (unsigned long)mapping; in __hw_perf_event_init()
1295 if (event->group_leader != event) { in __hw_perf_event_init()
1297 return -EINVAL; in __hw_perf_event_init()
1305 struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); in cci_pmu_event_init()
1306 atomic_t *active_events = &cci_pmu->active_events; in cci_pmu_event_init()
1309 if (event->attr.type != event->pmu->type) in cci_pmu_event_init()
1310 return -ENOENT; in cci_pmu_event_init()
1313 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in cci_pmu_event_init()
1314 return -EOPNOTSUPP; in cci_pmu_event_init()
1319 * handle cpu == -1 and pid == -1 for this case. in cci_pmu_event_init()
1325 if (event->cpu < 0) in cci_pmu_event_init()
1326 return -EINVAL; in cci_pmu_event_init()
1327 event->cpu = cci_pmu->cpu; in cci_pmu_event_init()
1329 event->destroy = hw_perf_event_destroy; in cci_pmu_event_init()
1331 mutex_lock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1336 mutex_unlock(&cci_pmu->reserve_mutex); in cci_pmu_event_init()
1354 return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); in pmu_cpumask_attr_show()
1388 const struct cci_pmu_model *model = cci_pmu->model; in cci_pmu_init()
1389 char *name = model->name; in cci_pmu_init()
1392 if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) in cci_pmu_init()
1393 return -EINVAL; in cci_pmu_init()
1394 if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) in cci_pmu_init()
1395 return -EINVAL; in cci_pmu_init()
1397 pmu_event_attr_group.attrs = model->event_attrs; in cci_pmu_init()
1398 pmu_format_attr_group.attrs = model->format_attrs; in cci_pmu_init()
1400 cci_pmu->pmu = (struct pmu) { in cci_pmu_init()
1402 .parent = &pdev->dev, in cci_pmu_init()
1403 .name = cci_pmu->model->name, in cci_pmu_init()
1417 cci_pmu->plat_device = pdev; in cci_pmu_init()
1419 if (num_cntrs > cci_pmu->model->num_hw_cntrs) { in cci_pmu_init()
1420 dev_warn(&pdev->dev, in cci_pmu_init()
1423 num_cntrs, cci_pmu->model->num_hw_cntrs); in cci_pmu_init()
1424 num_cntrs = cci_pmu->model->num_hw_cntrs; in cci_pmu_init()
1426 cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; in cci_pmu_init()
1428 return perf_pmu_register(&cci_pmu->pmu, name, -1); in cci_pmu_init()
1435 if (!g_cci_pmu || cpu != g_cci_pmu->cpu) in cci_pmu_offline_cpu()
1442 perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); in cci_pmu_offline_cpu()
1443 g_cci_pmu->cpu = target; in cci_pmu_offline_cpu()
1545 .compatible = "arm,cci-400-pmu",
1549 .compatible = "arm,cci-400-pmu,r0",
1553 .compatible = "arm,cci-400-pmu,r1",
1559 .compatible = "arm,cci-500-pmu,r0",
1563 .compatible = "arm,cci-550-pmu,r0",
1594 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1596 cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; in cci_pmu_alloc()
1601 "DEPRECATED compatible property, requires secure access to CCI registers"); in cci_pmu_alloc()
1605 dev_warn(dev, "CCI PMU version not supported\n"); in cci_pmu_alloc()
1606 return ERR_PTR(-ENODEV); in cci_pmu_alloc()
1609 cci_pmu->model = model; in cci_pmu_alloc()
1610 cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), in cci_pmu_alloc()
1611 sizeof(*cci_pmu->irqs), GFP_KERNEL); in cci_pmu_alloc()
1612 if (!cci_pmu->irqs) in cci_pmu_alloc()
1613 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1614 cci_pmu->hw_events.events = devm_kcalloc(dev, in cci_pmu_alloc()
1616 sizeof(*cci_pmu->hw_events.events), in cci_pmu_alloc()
1618 if (!cci_pmu->hw_events.events) in cci_pmu_alloc()
1619 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1620 cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev, in cci_pmu_alloc()
1623 if (!cci_pmu->hw_events.used_mask) in cci_pmu_alloc()
1624 return ERR_PTR(-ENOMEM); in cci_pmu_alloc()
1634 cci_pmu = cci_pmu_alloc(&pdev->dev); in cci_pmu_probe()
1638 cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); in cci_pmu_probe()
1639 if (IS_ERR(cci_pmu->base)) in cci_pmu_probe()
1640 return -ENOMEM; in cci_pmu_probe()
1643 * CCI PMU has one overflow interrupt per counter; but some may be tied in cci_pmu_probe()
1646 cci_pmu->nr_irqs = 0; in cci_pmu_probe()
1647 for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { in cci_pmu_probe()
1652 if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) in cci_pmu_probe()
1655 cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; in cci_pmu_probe()
1662 if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { in cci_pmu_probe()
1663 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", in cci_pmu_probe()
1664 i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); in cci_pmu_probe()
1665 return -EINVAL; in cci_pmu_probe()
1668 raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); in cci_pmu_probe()
1669 mutex_init(&cci_pmu->reserve_mutex); in cci_pmu_probe()
1670 atomic_set(&cci_pmu->active_events, 0); in cci_pmu_probe()
1672 cci_pmu->cpu = raw_smp_processor_id(); in cci_pmu_probe()
1675 "perf/arm/cci:online", NULL, in cci_pmu_probe()
1682 pr_info("ARM %s PMU driver probed", cci_pmu->model->name); in cci_pmu_probe()
1697 perf_pmu_unregister(&g_cci_pmu->pmu); in cci_pmu_remove()
1713 MODULE_DESCRIPTION("ARM CCI PMU support");