Lines Matching +full:odd +full:- +full:numbered
1 // SPDX-License-Identifier: GPL-2.0-only
7 * Author: Deng-Cheng Zhu
29 #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
122 #define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
159 /* 0: Not Loongson-3
160 * 1: Loongson-3A1000/3B1000/3B1500
161 * 2: Loongson-3A2000/3A3000
162 * 3: Loongson-3A4000+
325 cntr_mask = (hwc->event_base >> 10) & 0xffff; in mipsxx_pmu_alloc_counter()
327 cntr_mask = (hwc->event_base >> 8) & 0xffff; in mipsxx_pmu_alloc_counter()
329 for (i = mipspmu.num_counters - 1; i >= 0; i--) { in mipsxx_pmu_alloc_counter()
332 * even and odd counters, whereas many other are only by in mipsxx_pmu_alloc_counter()
333 * even _or_ odd counters. This introduces an issue that in mipsxx_pmu_alloc_counter()
341 !test_and_set_bit(i, cpuc->used_mask)) in mipsxx_pmu_alloc_counter()
345 return -EAGAIN; in mipsxx_pmu_alloc_counter()
352 unsigned int range = evt->event_base >> 24; in mipsxx_pmu_enable_event()
357 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) | in mipsxx_pmu_enable_event()
358 (evt->config_base & M_PERFCTL_CONFIG_MASK) | in mipsxx_pmu_enable_event()
362 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | in mipsxx_pmu_enable_event()
363 (evt->config_base & M_PERFCTL_CONFIG_MASK) | in mipsxx_pmu_enable_event()
369 cpuc->saved_ctrl[idx] |= in mipsxx_pmu_enable_event()
374 cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; in mipsxx_pmu_enable_event()
379 * Set up the counter for a particular CPU when event->cpu is in mipsxx_pmu_enable_event()
383 cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); in mipsxx_pmu_enable_event()
387 cpuc->saved_ctrl[idx] |= ctrl; in mipsxx_pmu_enable_event()
403 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & in mipsxx_pmu_disable_event()
405 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); in mipsxx_pmu_disable_event()
413 u64 left = local64_read(&hwc->period_left); in mipspmu_event_set_period()
414 u64 period = hwc->sample_period; in mipspmu_event_set_period()
420 local64_set(&hwc->period_left, left); in mipspmu_event_set_period()
421 hwc->last_period = period; in mipspmu_event_set_period()
426 local64_set(&hwc->period_left, left); in mipspmu_event_set_period()
427 hwc->last_period = period; in mipspmu_event_set_period()
433 local64_set(&hwc->period_left, left); in mipspmu_event_set_period()
436 local64_set(&hwc->prev_count, mipspmu.overflow - left); in mipspmu_event_set_period()
440 M_PERFCTL_EVENT(hwc->event_base & 0x3ff)); in mipspmu_event_set_period()
442 mipspmu.write_counter(idx, mipspmu.overflow - left); in mipspmu_event_set_period()
457 prev_raw_count = local64_read(&hwc->prev_count); in mipspmu_event_update()
460 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, in mipspmu_event_update()
464 delta = new_raw_count - prev_raw_count; in mipspmu_event_update()
466 local64_add(delta, &event->count); in mipspmu_event_update()
467 local64_sub(delta, &hwc->period_left); in mipspmu_event_update()
472 struct hw_perf_event *hwc = &event->hw; in mipspmu_start()
475 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in mipspmu_start()
477 hwc->state = 0; in mipspmu_start()
480 mipspmu_event_set_period(event, hwc, hwc->idx); in mipspmu_start()
483 mipsxx_pmu_enable_event(hwc, hwc->idx); in mipspmu_start()
488 struct hw_perf_event *hwc = &event->hw; in mipspmu_stop()
490 if (!(hwc->state & PERF_HES_STOPPED)) { in mipspmu_stop()
492 mipsxx_pmu_disable_event(hwc->idx); in mipspmu_stop()
494 mipspmu_event_update(event, hwc, hwc->idx); in mipspmu_stop()
495 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; in mipspmu_stop()
502 struct hw_perf_event *hwc = &event->hw; in mipspmu_add()
506 perf_pmu_disable(event->pmu); in mipspmu_add()
519 event->hw.idx = idx; in mipspmu_add()
521 cpuc->events[idx] = event; in mipspmu_add()
523 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; in mipspmu_add()
531 perf_pmu_enable(event->pmu); in mipspmu_add()
538 struct hw_perf_event *hwc = &event->hw; in mipspmu_del()
539 int idx = hwc->idx; in mipspmu_del()
544 cpuc->events[idx] = NULL; in mipspmu_del()
545 clear_bit(idx, cpuc->used_mask); in mipspmu_del()
552 struct hw_perf_event *hwc = &event->hw; in mipspmu_read()
555 if (hwc->idx < 0) in mipspmu_read()
558 mipspmu_event_update(event, hwc, hwc->idx); in mipspmu_read()
570 * MIPS performance counters can be per-TC. The control registers can
616 err = -ENOENT; in mipspmu_get_irq()
632 * specific low-level init routines.
658 return -EOPNOTSUPP; in mipspmu_event_init()
660 switch (event->attr.type) { in mipspmu_event_init()
667 return -ENOENT; in mipspmu_event_init()
670 if (event->cpu >= 0 && !cpu_online(event->cpu)) in mipspmu_event_init()
671 return -ENODEV; in mipspmu_event_init()
708 return ((unsigned int)pev->range << 24) | in mipspmu_perf_event_encode()
709 (pev->cntr_mask & 0xffff00) | in mipspmu_perf_event_encode()
710 (pev->event_id & 0xff); in mipspmu_perf_event_encode()
715 return (pev->cntr_mask & 0xfffc00) | in mipspmu_perf_event_encode()
716 (pev->event_id & 0x3ff); in mipspmu_perf_event_encode()
718 return (pev->cntr_mask & 0xffff00) | in mipspmu_perf_event_encode()
719 (pev->event_id & 0xff); in mipspmu_perf_event_encode()
727 return ERR_PTR(-EOPNOTSUPP); in mipspmu_map_general_event()
738 return ERR_PTR(-EINVAL); in mipspmu_map_cache_event()
742 return ERR_PTR(-EINVAL); in mipspmu_map_cache_event()
746 return ERR_PTR(-EINVAL); in mipspmu_map_cache_event()
753 if (pev->cntr_mask == 0) in mipspmu_map_cache_event()
754 return ERR_PTR(-EOPNOTSUPP); in mipspmu_map_cache_event()
762 struct perf_event *sibling, *leader = event->group_leader; in validate_group()
767 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) in validate_group()
768 return -EINVAL; in validate_group()
771 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) in validate_group()
772 return -EINVAL; in validate_group()
775 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) in validate_group()
776 return -EINVAL; in validate_group()
786 struct perf_event *event = cpuc->events[idx]; in handle_associated_event()
787 struct hw_perf_event *hwc = &event->hw; in handle_associated_event()
790 data->period = event->hw.last_period; in handle_associated_event()
1136 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
1473 struct perf_event_attr *attr = &event->attr; in __hw_perf_event_init()
1474 struct hw_perf_event *hwc = &event->hw; in __hw_perf_event_init()
1479 if (PERF_TYPE_HARDWARE == event->attr.type) { in __hw_perf_event_init()
1480 if (event->attr.config >= PERF_COUNT_HW_MAX) in __hw_perf_event_init()
1481 return -EINVAL; in __hw_perf_event_init()
1482 pev = mipspmu_map_general_event(event->attr.config); in __hw_perf_event_init()
1483 } else if (PERF_TYPE_HW_CACHE == event->attr.type) { in __hw_perf_event_init()
1484 pev = mipspmu_map_cache_event(event->attr.config); in __hw_perf_event_init()
1485 } else if (PERF_TYPE_RAW == event->attr.type) { in __hw_perf_event_init()
1488 pev = mipspmu.map_raw_event(event->attr.config); in __hw_perf_event_init()
1491 return -EOPNOTSUPP; in __hw_perf_event_init()
1495 if (PERF_TYPE_RAW == event->attr.type) in __hw_perf_event_init()
1504 hwc->config_base = MIPS_PERFCTRL_IE; in __hw_perf_event_init()
1506 hwc->event_base = mipspmu_perf_event_encode(pev); in __hw_perf_event_init()
1507 if (PERF_TYPE_RAW == event->attr.type) in __hw_perf_event_init()
1510 if (!attr->exclude_user) in __hw_perf_event_init()
1511 hwc->config_base |= MIPS_PERFCTRL_U; in __hw_perf_event_init()
1512 if (!attr->exclude_kernel) { in __hw_perf_event_init()
1513 hwc->config_base |= MIPS_PERFCTRL_K; in __hw_perf_event_init()
1515 hwc->config_base |= MIPS_PERFCTRL_EXL; in __hw_perf_event_init()
1517 if (!attr->exclude_hv) in __hw_perf_event_init()
1518 hwc->config_base |= MIPS_PERFCTRL_S; in __hw_perf_event_init()
1520 hwc->config_base &= M_PERFCTL_CONFIG_MASK; in __hw_perf_event_init()
1525 hwc->idx = -1; in __hw_perf_event_init()
1526 hwc->config = 0; in __hw_perf_event_init()
1528 if (!hwc->sample_period) { in __hw_perf_event_init()
1529 hwc->sample_period = mipspmu.max_period; in __hw_perf_event_init()
1530 hwc->last_period = hwc->sample_period; in __hw_perf_event_init()
1531 local64_set(&hwc->period_left, hwc->sample_period); in __hw_perf_event_init()
1535 if (event->group_leader != event) in __hw_perf_event_init()
1538 event->destroy = hw_perf_event_destroy; in __hw_perf_event_init()
1541 event->destroy(event); in __hw_perf_event_init()
1554 ctr--; in pause_local_counters()
1555 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); in pause_local_counters()
1556 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & in pause_local_counters()
1568 ctr--; in resume_local_counters()
1569 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); in resume_local_counters()
1600 for (n = counters - 1; n >= 0; n--) { in mipsxx_pmu_handle_shared_irq()
1601 if (!test_bit(n, cpuc->used_mask)) in mipsxx_pmu_handle_shared_irq()
1693 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1694 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1695 * indicate the even/odd bank selector. So, for example, when user wants to take
1696 * the Event Num of 15 for odd counters (by referring to the user manual), then
1701 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1702 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
1706 /* currently most cores have 7-bit event numbers */ in mipsxx_pmu_map_raw_event()
1720 * This is actually doing nothing. Non-multithreading in mipsxx_pmu_map_raw_event()
1764 /* 8-bit event numbers */ in mipsxx_pmu_map_raw_event()
1778 /* 8-bit event numbers */ in mipsxx_pmu_map_raw_event()
1840 return ERR_PTR(-EOPNOTSUPP); in mipsxx_pmu_map_raw_event()
1871 return ERR_PTR(-EOPNOTSUPP); in octeon_pmu_map_raw_event()
1882 return ERR_PTR(-EOPNOTSUPP); in octeon_pmu_map_raw_event()
1900 return -ENODEV; in init_hw_perf_events()
1913 irq = -1; in init_hw_perf_events()
2017 return -ENODEV; in init_hw_perf_events()
2026 mipspmu.max_period = (1ULL << 47) - 1; in init_hw_perf_events()
2027 mipspmu.valid_count = (1ULL << 47) - 1; in init_hw_perf_events()
2031 mipspmu.max_period = (1ULL << 63) - 1; in init_hw_perf_events()
2032 mipspmu.valid_count = (1ULL << 63) - 1; in init_hw_perf_events()
2039 mipspmu.max_period = (1ULL << 31) - 1; in init_hw_perf_events()
2040 mipspmu.valid_count = (1ULL << 31) - 1; in init_hw_perf_events()
2048 pr_cont("%s PMU enabled, %d %d-bit counters available to each " in init_hw_perf_events()