Lines Matching +full:cpu +full:- +full:cfg

1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/io-64-nonatomic-lo-hi.h>
66 /* CXL rev 3.0 Table 13-5 Events under CXL Vendor ID */
120 * - Fixed function counters refer to an Event Capabilities register.
127 void __iomem *base = info->base; in cxl_pmu_parse_caps()
137 return -ENODEV; in cxl_pmu_parse_caps()
140 info->num_counters = FIELD_GET(CXL_PMU_CAP_NUM_COUNTERS_MSK, val) + 1; in cxl_pmu_parse_caps()
141 info->counter_width = FIELD_GET(CXL_PMU_CAP_COUNTER_WIDTH_MSK, val); in cxl_pmu_parse_caps()
142 info->num_event_capabilities = FIELD_GET(CXL_PMU_CAP_NUM_EVN_CAP_REG_SUP_MSK, val) + 1; in cxl_pmu_parse_caps()
144 info->filter_hdm = FIELD_GET(CXL_PMU_CAP_FILTERS_SUP_MSK, val) & CXL_PMU_FILTER_HDM; in cxl_pmu_parse_caps()
146 info->irq = FIELD_GET(CXL_PMU_CAP_MSI_N_MSK, val); in cxl_pmu_parse_caps()
148 info->irq = -1; in cxl_pmu_parse_caps()
151 for (i = 0; i < info->num_counters; i++) { in cxl_pmu_parse_caps()
160 set_bit(i, info->conf_counter_bm); in cxl_pmu_parse_caps()
173 return -ENOMEM; in cxl_pmu_parse_caps()
175 pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval); in cxl_pmu_parse_caps()
176 pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval); in cxl_pmu_parse_caps()
177 /* For a fixed purpose counter use the events mask from the counter CFG */ in cxl_pmu_parse_caps()
178 pmu_ev->msk = events_msk; in cxl_pmu_parse_caps()
179 pmu_ev->counter_idx = i; in cxl_pmu_parse_caps()
181 list_add(&pmu_ev->node, &info->event_caps_fixed); in cxl_pmu_parse_caps()
189 if (!bitmap_empty(info->conf_counter_bm, CXL_PMU_MAX_COUNTERS)) { in cxl_pmu_parse_caps()
194 info->num_event_capabilities) { in cxl_pmu_parse_caps()
197 return -ENOMEM; in cxl_pmu_parse_caps()
200 pmu_ev->vid = FIELD_GET(CXL_PMU_EVENT_CAP_VENDOR_ID_MSK, eval); in cxl_pmu_parse_caps()
201 pmu_ev->gid = FIELD_GET(CXL_PMU_EVENT_CAP_GROUP_ID_MSK, eval); in cxl_pmu_parse_caps()
202 pmu_ev->msk = FIELD_GET(CXL_PMU_EVENT_CAP_SUPPORTED_EVENTS_MSK, eval); in cxl_pmu_parse_caps()
203 pmu_ev->event_idx = j; in cxl_pmu_parse_caps()
204 list_add(&pmu_ev->node, &info->event_caps_configurable); in cxl_pmu_parse_caps()
231 [cxl_pmu_mask_attr] = CXL_PMU_FORMAT_ATTR(mask, "config:0-31"),
232 [cxl_pmu_gid_attr] = CXL_PMU_FORMAT_ATTR(gid, "config:32-47"),
233 [cxl_pmu_vid_attr] = CXL_PMU_FORMAT_ATTR(vid, "config:48-63"),
234 [cxl_pmu_threshold_attr] = CXL_PMU_FORMAT_ATTR(threshold, "config1:0-15"),
238 [cxl_pmu_hdm_attr] = CXL_PMU_FORMAT_ATTR(hdm, "config2:0-15"),
261 if (!info->filter_hdm && in cxl_pmu_format_is_visible()
266 return attr->mode; in cxl_pmu_format_is_visible()
277 return FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, event->attr.config); in cxl_pmu_config_get_mask()
282 return FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, event->attr.config); in cxl_pmu_config_get_gid()
287 return FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, event->attr.config); in cxl_pmu_config_get_vid()
292 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_THRESHOLD_MSK, event->attr.config1); in cxl_pmu_config1_get_threshold()
297 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_INVERT_MSK, event->attr.config1); in cxl_pmu_config1_get_invert()
302 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_EDGE_MSK, event->attr.config1); in cxl_pmu_config1_get_edge()
314 return FIELD_GET(CXL_PMU_ATTR_CONFIG1_FILTER_EN_MSK, event->attr.config1); in cxl_pmu_config1_hdm_filter_en()
319 return FIELD_GET(CXL_PMU_ATTR_CONFIG2_HDM_MSK, event->attr.config2); in cxl_pmu_config2_get_hdm_decoder()
328 return sysfs_emit(buf, "config=%#llx\n", pmu_attr->id); in cxl_pmu_event_sysfs_show()
341 /* CXL rev 3.0 Table 3-17 - Device to Host Requests */
357 /* CXL rev 3.0 Table 3-20 - D2H Repsonse Encodings */
365 /* CXL rev 3.0 Table 3-21 - CXL.cache - Mapping of H2D Requests to D2H Responses */
369 /* CXL rev 3.0 Table 3-22 - H2D Response Opcode Encodings */
377 /* CXL rev 3.0 Table 13-5 directly lists these */
380 /* CXL rev 3.0 Table 3-29 M2S Req Memory Opcodes */
389 /* CXL rev 3.0 Table 3-35 M2S RwD Memory Opcodes */
393 /* CXL rev 3.0 Table 3-38 M2S BIRsp Memory Opcodes */
400 /* CXL rev 3.0 Table 3-40 S2M BISnp Opcodes */
407 /* CXL rev 3.0 Table 3-43 S2M NDR Opcopdes */
412 /* CXL rev 3.0 Table 3-46 S2M DRS opcodes */
415 /* CXL rev 3.0 Table 13-5 directly lists these */
431 list_for_each_entry(pmu_ev, &info->event_caps_fixed, node) { in cxl_pmu_find_fixed_counter_ev_cap()
432 if (vid != pmu_ev->vid || gid != pmu_ev->gid) in cxl_pmu_find_fixed_counter_ev_cap()
436 if (msk == pmu_ev->msk) in cxl_pmu_find_fixed_counter_ev_cap()
440 return ERR_PTR(-EINVAL); in cxl_pmu_find_fixed_counter_ev_cap()
448 list_for_each_entry(pmu_ev, &info->event_caps_configurable, node) { in cxl_pmu_find_config_counter_ev_cap()
449 if (vid != pmu_ev->vid || gid != pmu_ev->gid) in cxl_pmu_find_config_counter_ev_cap()
453 if (msk & ~pmu_ev->msk) in cxl_pmu_find_config_counter_ev_cap()
459 return ERR_PTR(-EINVAL); in cxl_pmu_find_config_counter_ev_cap()
469 int vid = FIELD_GET(CXL_PMU_ATTR_CONFIG_VID_MSK, pmu_attr->id); in cxl_pmu_event_is_visible()
470 int gid = FIELD_GET(CXL_PMU_ATTR_CONFIG_GID_MSK, pmu_attr->id); in cxl_pmu_event_is_visible()
471 int msk = FIELD_GET(CXL_PMU_ATTR_CONFIG_MASK_MSK, pmu_attr->id); in cxl_pmu_event_is_visible()
474 return attr->mode; in cxl_pmu_event_is_visible()
477 return attr->mode; in cxl_pmu_event_is_visible()
493 return cpumap_print_to_pagebuf(true, buf, cpumask_of(info->on_cpu)); in cpumask_show()
517 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_get_event_idx()
532 if (!test_bit(pmu_ev->counter_idx, info->used_counter_bm)) { in cxl_pmu_get_event_idx()
533 *counter_idx = pmu_ev->counter_idx; in cxl_pmu_get_event_idx()
544 bitmap_andnot(configurable_and_free, info->conf_counter_bm, in cxl_pmu_get_event_idx()
545 info->used_counter_bm, CXL_PMU_MAX_COUNTERS); in cxl_pmu_get_event_idx()
549 return -EINVAL; in cxl_pmu_get_event_idx()
555 return -EINVAL; in cxl_pmu_get_event_idx()
560 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_init()
563 /* Top level type sanity check - is this a Hardware Event being requested */ in cxl_pmu_event_init()
564 if (event->attr.type != event->pmu->type) in cxl_pmu_event_init()
565 return -ENOENT; in cxl_pmu_event_init()
567 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in cxl_pmu_event_init()
568 return -EOPNOTSUPP; in cxl_pmu_event_init()
580 event->cpu = info->on_cpu; in cxl_pmu_event_init()
588 void __iomem *base = info->base; in cxl_pmu_enable()
597 void __iomem *base = info->base; in cxl_pmu_disable()
611 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_start()
612 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_start()
613 void __iomem *base = info->base; in cxl_pmu_event_start()
614 u64 cfg; in cxl_pmu_event_start() local
620 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) in cxl_pmu_event_start()
623 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in cxl_pmu_event_start()
624 hwc->state = 0; in cxl_pmu_event_start()
630 if (info->filter_hdm) { in cxl_pmu_event_start()
632 cfg = cxl_pmu_config2_get_hdm_decoder(event); in cxl_pmu_event_start()
634 cfg = GENMASK(31, 0); /* No filtering if 0xFFFF_FFFF */ in cxl_pmu_event_start()
635 writeq(cfg, base + CXL_PMU_FILTER_CFG_REG(hwc->idx, 0)); in cxl_pmu_event_start()
638 cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_start()
639 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1); in cxl_pmu_event_start()
640 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_FREEZE_ON_OVRFLW, 1); in cxl_pmu_event_start()
641 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_ENABLE, 1); in cxl_pmu_event_start()
642 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EDGE, in cxl_pmu_event_start()
644 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_INVERT, in cxl_pmu_event_start()
648 if (test_bit(hwc->idx, info->conf_counter_bm)) { in cxl_pmu_event_start()
649 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENT_GRP_ID_IDX_MSK, in cxl_pmu_event_start()
650 hwc->event_base); in cxl_pmu_event_start()
651 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_EVENTS_MSK, in cxl_pmu_event_start()
654 cfg &= ~CXL_PMU_COUNTER_CFG_THRESHOLD_MSK; in cxl_pmu_event_start()
661 * (CXL 3.0 8.2.7.2.1 Counter Configuration - threshold field definition) in cxl_pmu_event_start()
663 cfg |= FIELD_PREP(CXL_PMU_COUNTER_CFG_THRESHOLD_MSK, in cxl_pmu_event_start()
665 writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_start()
667 local64_set(&hwc->prev_count, 0); in cxl_pmu_event_start()
668 writeq(0, base + CXL_PMU_COUNTER_REG(hwc->idx)); in cxl_pmu_event_start()
675 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_read_counter()
676 void __iomem *base = info->base; in cxl_pmu_read_counter()
678 return readq(base + CXL_PMU_COUNTER_REG(event->hw.idx)); in cxl_pmu_read_counter()
683 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in __cxl_pmu_read()
684 struct hw_perf_event *hwc = &event->hw; in __cxl_pmu_read()
688 prev_cnt = local64_read(&hwc->prev_count); in __cxl_pmu_read()
690 } while (local64_cmpxchg(&hwc->prev_count, prev_cnt, new_cnt) != prev_cnt); in __cxl_pmu_read()
696 delta = (new_cnt - prev_cnt) & GENMASK_ULL(info->counter_width - 1, 0); in __cxl_pmu_read()
697 if (overflow && delta < GENMASK_ULL(info->counter_width - 1, 0)) in __cxl_pmu_read()
698 delta += (1UL << info->counter_width); in __cxl_pmu_read()
700 local64_add(delta, &event->count); in __cxl_pmu_read()
710 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_stop()
711 void __iomem *base = info->base; in cxl_pmu_event_stop()
712 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_stop()
713 u64 cfg; in cxl_pmu_event_stop() local
716 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in cxl_pmu_event_stop()
717 hwc->state |= PERF_HES_STOPPED; in cxl_pmu_event_stop()
719 cfg = readq(base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_stop()
720 cfg &= ~(FIELD_PREP(CXL_PMU_COUNTER_CFG_INT_ON_OVRFLW, 1) | in cxl_pmu_event_stop()
722 writeq(cfg, base + CXL_PMU_COUNTER_CFG_REG(hwc->idx)); in cxl_pmu_event_stop()
724 hwc->state |= PERF_HES_UPTODATE; in cxl_pmu_event_stop()
729 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_add()
730 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_add()
734 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in cxl_pmu_event_add()
740 hwc->idx = idx; in cxl_pmu_event_add()
743 hwc->event_base = event_idx; in cxl_pmu_event_add()
744 info->hw_events[idx] = event; in cxl_pmu_event_add()
745 set_bit(idx, info->used_counter_bm); in cxl_pmu_event_add()
755 struct cxl_pmu_info *info = pmu_to_cxl_pmu_info(event->pmu); in cxl_pmu_event_del()
756 struct hw_perf_event *hwc = &event->hw; in cxl_pmu_event_del()
759 clear_bit(hwc->idx, info->used_counter_bm); in cxl_pmu_event_del()
760 info->hw_events[hwc->idx] = NULL; in cxl_pmu_event_del()
767 void __iomem *base = info->base; in cxl_pmu_irq()
779 for_each_set_bit(i, overflowedbm, info->num_counters) { in cxl_pmu_irq()
780 struct perf_event *event = info->hw_events[i]; in cxl_pmu_irq()
783 dev_dbg(info->pmu.dev, in cxl_pmu_irq()
800 perf_pmu_unregister(&info->pmu); in cxl_pmu_perf_unregister()
807 cpuhp_state_remove_instance_nocalls(cxl_pmu_cpuhp_state_num, &info->node); in cxl_pmu_cpuhp_remove()
813 struct pci_dev *pdev = to_pci_dev(dev->parent); in cxl_pmu_probe()
821 return -ENOMEM; in cxl_pmu_probe()
824 INIT_LIST_HEAD(&info->event_caps_fixed); in cxl_pmu_probe()
825 INIT_LIST_HEAD(&info->event_caps_configurable); in cxl_pmu_probe()
827 info->base = pmu->base; in cxl_pmu_probe()
829 info->on_cpu = -1; in cxl_pmu_probe()
834 info->hw_events = devm_kcalloc(dev, sizeof(*info->hw_events), in cxl_pmu_probe()
835 info->num_counters, GFP_KERNEL); in cxl_pmu_probe()
836 if (!info->hw_events) in cxl_pmu_probe()
837 return -ENOMEM; in cxl_pmu_probe()
839 switch (pmu->type) { in cxl_pmu_probe()
842 pmu->assoc_id, pmu->index); in cxl_pmu_probe()
846 return -ENOMEM; in cxl_pmu_probe()
848 info->pmu = (struct pmu) { in cxl_pmu_probe()
865 if (info->irq <= 0) in cxl_pmu_probe()
866 return -EINVAL; in cxl_pmu_probe()
868 rc = pci_irq_vector(pdev, info->irq); in cxl_pmu_probe()
875 return -ENOMEM; in cxl_pmu_probe()
881 info->irq = irq; in cxl_pmu_probe()
883 rc = cpuhp_state_add_instance(cxl_pmu_cpuhp_state_num, &info->node); in cxl_pmu_probe()
891 rc = perf_pmu_register(&info->pmu, info->pmu.name, -1); in cxl_pmu_probe()
908 static int cxl_pmu_online_cpu(unsigned int cpu, struct hlist_node *node) in cxl_pmu_online_cpu() argument
912 if (info->on_cpu != -1) in cxl_pmu_online_cpu()
915 info->on_cpu = cpu; in cxl_pmu_online_cpu()
917 * CPU HP lock is held so we should be guaranteed that the CPU hasn't yet in cxl_pmu_online_cpu()
920 WARN_ON(irq_set_affinity(info->irq, cpumask_of(cpu))); in cxl_pmu_online_cpu()
925 static int cxl_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node) in cxl_pmu_offline_cpu() argument
930 if (info->on_cpu != cpu) in cxl_pmu_offline_cpu()
933 info->on_cpu = -1; in cxl_pmu_offline_cpu()
934 target = cpumask_any_but(cpu_online_mask, cpu); in cxl_pmu_offline_cpu()
936 dev_err(info->pmu.dev, "Unable to find a suitable CPU\n"); in cxl_pmu_offline_cpu()
940 perf_pmu_migrate_context(&info->pmu, cpu, target); in cxl_pmu_offline_cpu()
941 info->on_cpu = target; in cxl_pmu_offline_cpu()
943 * CPU HP lock is held so we should be guaranteed that this CPU hasn't yet in cxl_pmu_offline_cpu()
946 WARN_ON(irq_set_affinity(info->irq, cpumask_of(target))); in cxl_pmu_offline_cpu()