Lines Matching +full:package +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0-only
29 #include <asm/intel-family.h>
105 #define NR_RAW_PRIMITIVES (NR_RAPL_PRIMITIVES - 2)
129 return rd->rpl[pl].name ? true : false; in is_pl_valid()
134 if (rd->rp->priv->type == RAPL_IF_TPMI) { in get_pl_lock_prim()
145 return -EINVAL; in get_pl_lock_prim()
151 if (rd->rp->priv->limits[rd->id] & BIT(POWER_LIMIT2)) in get_pl_lock_prim()
162 if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI) in get_pl_prim()
172 return -EINVAL; in get_pl_prim()
176 if (prim == PL_CLAMP && rd->rp->priv->type != RAPL_IF_TPMI) in get_pl_prim()
186 return -EINVAL; in get_pl_prim()
197 return -EINVAL; in get_pl_prim()
199 return -EINVAL; in get_pl_prim()
209 void (*set_floor_freq)(struct rapl_domain *rd, bool mode);
221 return rp->priv->defaults; in get_defaults()
272 "package",
299 return -EIO; in get_energy_counter()
313 struct rapl_package *rp = rd->rp; in release_zone()
315 /* package zone is the last zone of a package, we can free in release_zone()
318 if (rd->id == RAPL_DOMAIN_PACKAGE) { in release_zone()
320 rp->domains = NULL; in release_zone()
339 static int set_domain_enable(struct powercap_zone *power_zone, bool mode) in set_domain_enable() argument
342 struct rapl_defaults *defaults = get_defaults(rd->rp); in set_domain_enable()
346 ret = rapl_write_pl_data(rd, POWER_LIMIT1, PL_ENABLE, mode); in set_domain_enable()
347 if (!ret && defaults->set_floor_freq) in set_domain_enable()
348 defaults->set_floor_freq(rd, mode); in set_domain_enable()
354 static int get_domain_enable(struct powercap_zone *power_zone, bool *mode) in get_domain_enable() argument
360 if (rd->rpl[POWER_LIMIT1].locked) { in get_domain_enable()
361 *mode = false; in get_domain_enable()
367 *mode = val; in get_domain_enable()
419 * index in that some PLs maybe missing due to non-existent MSRs. So we
434 return -EINVAL; in contraint_to_pl()
448 rp = rd->rp; in set_power_limit()
525 return rd->rpl[id].name; in get_constraint_name()
566 return rp->lead_cpu >= 0 ? rp->lead_cpu : rp->id; in get_rid()
569 /* called after domain detection and package level data are set */
574 struct rapl_domain *rd = rp->domains; in rapl_init_domains()
577 unsigned int mask = rp->domain_map & (1 << i); in rapl_init_domains()
583 rd->rp = rp; in rapl_init_domains()
585 if (i == RAPL_DOMAIN_PLATFORM && rp->id > 0) { in rapl_init_domains()
586 snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "psys-%d", in rapl_init_domains()
587 rp->lead_cpu >= 0 ? topology_physical_package_id(rp->lead_cpu) : in rapl_init_domains()
588 rp->id); in rapl_init_domains()
590 snprintf(rd->name, RAPL_DOMAIN_NAME_LENGTH, "%s", in rapl_init_domains()
594 rd->id = i; in rapl_init_domains()
597 rp->priv->limits[i] |= BIT(POWER_LIMIT1); in rapl_init_domains()
600 if (rp->priv->limits[i] & BIT(t)) in rapl_init_domains()
601 rd->rpl[t].name = pl_names[t]; in rapl_init_domains()
605 rd->regs[j] = rp->priv->regs[i][j]; in rapl_init_domains()
615 struct rapl_defaults *defaults = get_defaults(rd->rp); in rapl_unit_xlate()
620 units = rd->power_unit; in rapl_unit_xlate()
624 units = rd->energy_unit; in rapl_unit_xlate()
627 return defaults->compute_time_window(rd, value, to_raw); in rapl_unit_xlate()
692 /* non-hardware */
734 /* non-hardware */
741 struct rapl_primitive_info *rpi = rp->priv->rpi; in get_rpi()
751 switch (rp->priv->type) { in rapl_config()
755 rp->priv->defaults = (void *)defaults_msr; in rapl_config()
756 rp->priv->rpi = (void *)rpi_msr; in rapl_config()
759 rp->priv->defaults = (void *)&defaults_tpmi; in rapl_config()
760 rp->priv->rpi = (void *)rpi_tpmi; in rapl_config()
763 return -EINVAL; in rapl_config()
767 if (!rp->priv->defaults || !rp->priv->rpi) in rapl_config()
768 return -ENODEV; in rapl_config()
776 struct rapl_defaults *defaults = get_defaults(rd->rp); in prim_fixups()
778 if (!defaults->spr_psys_bits) in prim_fixups()
781 if (rd->id != RAPL_DOMAIN_PLATFORM) in prim_fixups()
805 * RAPL MSRs are non-architectual and are laid out not consistently across
809 * is pre-assigned based on RAPL unit MSRs read at init time.
810 * 63-------------------------- 31--------------------------- 0
812 * | |<- shift ----------------|
813 * 63-------------------------- 31--------------------------- 0
820 struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed); in rapl_read_data_raw()
823 if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY) in rapl_read_data_raw()
824 return -EINVAL; in rapl_read_data_raw()
826 ra.reg = rd->regs[rpi->id]; in rapl_read_data_raw()
828 return -EINVAL; in rapl_read_data_raw()
830 /* non-hardware data are collected by the polling thread */ in rapl_read_data_raw()
831 if (rpi->flag & RAPL_PRIMITIVE_DERIVED) { in rapl_read_data_raw()
832 *data = rd->rdd.primitives[prim]; in rapl_read_data_raw()
836 ra.mask = rpi->mask; in rapl_read_data_raw()
838 if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { in rapl_read_data_raw()
839 pr_debug("failed to read reg 0x%llx for %s:%s\n", ra.reg.val, rd->rp->name, rd->name); in rapl_read_data_raw()
840 return -EIO; in rapl_read_data_raw()
843 value = ra.value >> rpi->shift; in rapl_read_data_raw()
846 *data = rapl_unit_xlate(rd, rpi->unit, value, 0); in rapl_read_data_raw()
859 struct rapl_primitive_info *rpi = get_rpi(rd->rp, prim_fixed); in rapl_write_data_raw()
864 if (!rpi || !rpi->name || rpi->flag & RAPL_PRIMITIVE_DUMMY) in rapl_write_data_raw()
865 return -EINVAL; in rapl_write_data_raw()
867 bits = rapl_unit_xlate(rd, rpi->unit, value, 1); in rapl_write_data_raw()
868 bits <<= rpi->shift; in rapl_write_data_raw()
869 bits &= rpi->mask; in rapl_write_data_raw()
873 ra.reg = rd->regs[rpi->id]; in rapl_write_data_raw()
874 ra.mask = rpi->mask; in rapl_write_data_raw()
877 ret = rd->rp->priv->write_raw(get_rid(rd->rp), &ra); in rapl_write_data_raw()
888 return -EINVAL; in rapl_read_pl_data()
900 return -EINVAL; in rapl_write_pl_data()
902 if (rd->rpl[pl].locked) { in rapl_write_pl_data()
903 pr_debug("%s:%s:%s locked by BIOS\n", rd->rp->name, rd->name, pl_names[pl]); in rapl_write_pl_data()
904 return -EACCES; in rapl_write_pl_data()
925 ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT]; in rapl_check_unit_core()
927 if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { in rapl_check_unit_core()
929 ra.reg.val, rd->rp->name, rd->name); in rapl_check_unit_core()
930 return -ENODEV; in rapl_check_unit_core()
934 rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); in rapl_check_unit_core()
937 rd->power_unit = 1000000 / (1 << value); in rapl_check_unit_core()
940 rd->time_unit = 1000000 / (1 << value); in rapl_check_unit_core()
943 rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit); in rapl_check_unit_core()
953 ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT]; in rapl_check_unit_atom()
955 if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { in rapl_check_unit_atom()
957 ra.reg.val, rd->rp->name, rd->name); in rapl_check_unit_atom()
958 return -ENODEV; in rapl_check_unit_atom()
962 rd->energy_unit = ENERGY_UNIT_SCALE * 1 << value; in rapl_check_unit_atom()
965 rd->power_unit = (1 << value) * 1000; in rapl_check_unit_atom()
968 rd->time_unit = 1000000 / (1 << value); in rapl_check_unit_atom()
971 rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit); in rapl_check_unit_atom()
983 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) { in power_limit_irq_save_cpu()
984 rp->power_limit_irq = l & PACKAGE_THERM_INT_PLN_ENABLE; in power_limit_irq_save_cpu()
985 rp->power_limit_irq |= PACKAGE_PLN_INT_SAVED; in power_limit_irq_save_cpu()
992 * When package power limit is set artificially low by RAPL, LVT
993 * thermal interrupt for package power limit should be ignored
1003 if (rp->lead_cpu < 0) in package_power_limit_irq_save()
1009 smp_call_function_single(rp->lead_cpu, power_limit_irq_save_cpu, rp, 1); in package_power_limit_irq_save()
1013 * Restore per package power limit interrupt enable state. Called from cpu
1014 * hotplug code on package removal.
1020 if (rp->lead_cpu < 0) in package_power_limit_irq_restore()
1027 if (!(rp->power_limit_irq & PACKAGE_PLN_INT_SAVED)) in package_power_limit_irq_restore()
1032 if (rp->power_limit_irq & PACKAGE_THERM_INT_PLN_ENABLE) in package_power_limit_irq_restore()
1040 static void set_floor_freq_default(struct rapl_domain *rd, bool mode) in set_floor_freq_default() argument
1044 /* always enable clamp such that p-state can go below OS requested in set_floor_freq_default()
1047 rapl_write_pl_data(rd, POWER_LIMIT1, PL_CLAMP, mode); in set_floor_freq_default()
1050 rapl_write_pl_data(rd, i, PL_ENABLE, mode); in set_floor_freq_default()
1051 rapl_write_pl_data(rd, i, PL_CLAMP, mode); in set_floor_freq_default()
1058 struct rapl_defaults *defaults = get_defaults(rd->rp); in set_floor_freq_atom()
1061 if (!defaults->floor_freq_reg_addr) { in set_floor_freq_atom()
1068 defaults->floor_freq_reg_addr, in set_floor_freq_atom()
1076 defaults->floor_freq_reg_addr, mdata); in set_floor_freq_atom()
1091 value = (1 << y) * (4 + f) * rd->time_unit / 4; in rapl_compute_time_window_core()
1093 if (value < rd->time_unit) in rapl_compute_time_window_core()
1096 do_div(value, rd->time_unit); in rapl_compute_time_window_core()
1106 f = div64_u64(4 * (value - (1ULL << y)), 1ULL << y); in rapl_compute_time_window_core()
1120 return (value) ? value * rd->time_unit : rd->time_unit; in rapl_compute_time_window_atom()
1122 value = div64_u64(value, rd->time_unit); in rapl_compute_time_window_atom()
1140 ra.reg = rd->regs[RAPL_DOMAIN_REG_UNIT]; in rapl_check_unit_tpmi()
1142 if (rd->rp->priv->read_raw(get_rid(rd->rp), &ra)) { in rapl_check_unit_tpmi()
1144 ra.reg.val, rd->rp->name, rd->name); in rapl_check_unit_tpmi()
1145 return -ENODEV; in rapl_check_unit_tpmi()
1149 rd->energy_unit = ENERGY_UNIT_SCALE * 1000000 / (1 << value); in rapl_check_unit_tpmi()
1152 rd->power_unit = 1000000 / (1 << value); in rapl_check_unit_tpmi()
1155 rd->time_unit = 1000000 / (1 << value); in rapl_check_unit_tpmi()
1158 rd->rp->name, rd->name, rd->energy_unit, rd->time_unit, rd->power_unit); in rapl_check_unit_tpmi()
1301 for (dmn = 0; dmn < rp->nr_domains; dmn++) { in rapl_update_domain_data()
1302 pr_debug("update %s domain %s data\n", rp->name, in rapl_update_domain_data()
1303 rp->domains[dmn].name); in rapl_update_domain_data()
1304 /* exclude non-raw primitives */ in rapl_update_domain_data()
1308 if (!rapl_read_data_raw(&rp->domains[dmn], prim, in rapl_update_domain_data()
1309 rpi->unit, &val)) in rapl_update_domain_data()
1310 rp->domains[dmn].rdd.primitives[prim] = val; in rapl_update_domain_data()
1322 /* Update the domain data of the new package */ in rapl_package_register_powercap()
1325 /* first we register package domain as the parent zone */ in rapl_package_register_powercap()
1326 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { in rapl_package_register_powercap()
1327 if (rd->id == RAPL_DOMAIN_PACKAGE) { in rapl_package_register_powercap()
1329 pr_debug("register package domain %s\n", rp->name); in rapl_package_register_powercap()
1330 power_zone = powercap_register_zone(&rd->power_zone, in rapl_package_register_powercap()
1331 rp->priv->control_type, rp->name, in rapl_package_register_powercap()
1332 NULL, &zone_ops[rd->id], nr_pl, in rapl_package_register_powercap()
1336 rp->name); in rapl_package_register_powercap()
1339 /* track parent zone in per package/socket data */ in rapl_package_register_powercap()
1340 rp->power_zone = power_zone; in rapl_package_register_powercap()
1341 /* done, only one package domain per socket */ in rapl_package_register_powercap()
1346 pr_err("no package domain found, unknown topology!\n"); in rapl_package_register_powercap()
1347 return -ENODEV; in rapl_package_register_powercap()
1349 /* now register domains as children of the socket/package */ in rapl_package_register_powercap()
1350 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { in rapl_package_register_powercap()
1351 struct powercap_zone *parent = rp->power_zone; in rapl_package_register_powercap()
1353 if (rd->id == RAPL_DOMAIN_PACKAGE) in rapl_package_register_powercap()
1355 if (rd->id == RAPL_DOMAIN_PLATFORM) in rapl_package_register_powercap()
1359 power_zone = powercap_register_zone(&rd->power_zone, in rapl_package_register_powercap()
1360 rp->priv->control_type, in rapl_package_register_powercap()
1361 rd->name, parent, in rapl_package_register_powercap()
1362 &zone_ops[rd->id], nr_pl, in rapl_package_register_powercap()
1367 rp->name, rd->name); in rapl_package_register_powercap()
1376 * Clean up previously initialized domains within the package if we in rapl_package_register_powercap()
1379 while (--rd >= rp->domains) { in rapl_package_register_powercap()
1380 pr_debug("unregister %s domain %s\n", rp->name, rd->name); in rapl_package_register_powercap()
1381 powercap_unregister_zone(rp->priv->control_type, in rapl_package_register_powercap()
1382 &rd->power_zone); in rapl_package_register_powercap()
1398 ra.reg = rp->priv->regs[domain][RAPL_DOMAIN_REG_STATUS]; in rapl_check_domain()
1402 return -EINVAL; in rapl_check_domain()
1404 /* make sure domain counters are available and contains non-zero in rapl_check_domain()
1409 if (rp->priv->read_raw(get_rid(rp), &ra) || !ra.value) in rapl_check_domain()
1410 return -ENODEV; in rapl_check_domain()
1417 * RAPL Interfaces without per domain unit register will use the package
1422 struct rapl_defaults *defaults = get_defaults(rd->rp); in rapl_get_domain_unit()
1425 if (!rd->regs[RAPL_DOMAIN_REG_UNIT].val) { in rapl_get_domain_unit()
1426 if (!rd->rp->priv->reg_unit.val) { in rapl_get_domain_unit()
1428 return -ENODEV; in rapl_get_domain_unit()
1430 rd->regs[RAPL_DOMAIN_REG_UNIT] = rd->rp->priv->reg_unit; in rapl_get_domain_unit()
1433 if (!defaults->check_unit) { in rapl_get_domain_unit()
1435 return -ENODEV; in rapl_get_domain_unit()
1438 ret = defaults->check_unit(rd); in rapl_get_domain_unit()
1442 if (rd->id == RAPL_DOMAIN_DRAM && defaults->dram_domain_energy_unit) in rapl_get_domain_unit()
1443 rd->energy_unit = defaults->dram_domain_energy_unit; in rapl_get_domain_unit()
1444 if (rd->id == RAPL_DOMAIN_PLATFORM && defaults->psys_domain_energy_unit) in rapl_get_domain_unit()
1445 rd->energy_unit = defaults->psys_domain_energy_unit; in rapl_get_domain_unit()
1451 * 1. Locked by BIOS, in this case we still provide read-only access so that
1466 rd->rpl[i].locked = true; in rapl_detect_powerlimit()
1468 rd->rp->name, rd->name, pl_names[i]); in rapl_detect_powerlimit()
1473 rd->rpl[i].name = NULL; in rapl_detect_powerlimit()
1478 * ensure the CPU belongs to the targeted package and CPU hotlug is disabled.
1486 /* use physical package id to read counters */ in rapl_detect_domains()
1488 rp->domain_map |= 1 << i; in rapl_detect_domains()
1492 rp->nr_domains = bitmap_weight(&rp->domain_map, RAPL_DOMAIN_MAX); in rapl_detect_domains()
1493 if (!rp->nr_domains) { in rapl_detect_domains()
1494 pr_debug("no valid rapl domains found in %s\n", rp->name); in rapl_detect_domains()
1495 return -ENODEV; in rapl_detect_domains()
1497 pr_debug("found %d domains on %s\n", rp->nr_domains, rp->name); in rapl_detect_domains()
1499 rp->domains = kcalloc(rp->nr_domains, sizeof(struct rapl_domain), in rapl_detect_domains()
1501 if (!rp->domains) in rapl_detect_domains()
1502 return -ENOMEM; in rapl_detect_domains()
1506 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { in rapl_detect_domains()
1537 * 2.3283064365386962890625e-10
1539 * energy units. Use 2.3283064365386962890625e-10 (2^-32) Joules as
1551 * 1. PMU is registered only if it is needed by a RAPL Package. PMU events for
1553 * 2. PMU is unregistered and registered when a new RAPL Package is probed and
1573 if (!rp->has_pmu) in get_pmu_cpu()
1577 if (rp->priv->type != RAPL_IF_TPMI) in get_pmu_cpu()
1580 /* TPMI RAPL uses any CPU in the package for PMU */ in get_pmu_cpu()
1582 if (topology_physical_package_id(cpu) == rp->id) in get_pmu_cpu()
1590 if (!rp->has_pmu) in is_rp_pmu_cpu()
1594 if (rp->priv->type != RAPL_IF_TPMI) in is_rp_pmu_cpu()
1597 /* TPMI RAPL uses any CPU in the package for PMU */ in is_rp_pmu_cpu()
1598 return topology_physical_package_id(cpu) == rp->id; in is_rp_pmu_cpu()
1603 struct rapl_package *rp = event->pmu_private; in event_to_pmu_data()
1605 return &rp->pmu_data; in event_to_pmu_data()
1612 struct rapl_package *rp = event->pmu_private; in event_read_counter()
1617 if (event->hw.idx < 0) in event_read_counter()
1620 ret = rapl_read_data_raw(&rp->domains[event->hw.idx], ENERGY_COUNTER, false, &val); in event_read_counter()
1633 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED))) in __rapl_pmu_event_start()
1636 event->hw.state = 0; in __rapl_pmu_event_start()
1638 list_add_tail(&event->active_entry, &data->active_list); in __rapl_pmu_event_start()
1640 local64_set(&event->hw.prev_count, event_read_counter(event)); in __rapl_pmu_event_start()
1641 if (++data->n_active == 1) in __rapl_pmu_event_start()
1642 hrtimer_start(&data->hrtimer, data->timer_interval, in __rapl_pmu_event_start()
1646 static void rapl_pmu_event_start(struct perf_event *event, int mode) in rapl_pmu_event_start() argument
1651 raw_spin_lock_irqsave(&data->lock, flags); in rapl_pmu_event_start()
1653 raw_spin_unlock_irqrestore(&data->lock, flags); in rapl_pmu_event_start()
1658 struct hw_perf_event *hwc = &event->hw; in rapl_event_update()
1664 * Follow the generic code to drain hwc->prev_count. in rapl_event_update()
1667 prev_raw_count = local64_read(&hwc->prev_count); in rapl_event_update()
1670 } while (!local64_try_cmpxchg(&hwc->prev_count, in rapl_event_update()
1677 * (event-)time and add that to the generic event. in rapl_event_update()
1679 delta = new_raw_count - prev_raw_count; in rapl_event_update()
1682 * Scale delta to smallest unit (2^-32) in rapl_event_update()
1684 * or use ldexp(count, -32). in rapl_event_update()
1687 sdelta = delta * data->scale[event->hw.flags]; in rapl_event_update()
1689 local64_add(sdelta, &event->count); in rapl_event_update()
1694 static void rapl_pmu_event_stop(struct perf_event *event, int mode) in rapl_pmu_event_stop() argument
1697 struct hw_perf_event *hwc = &event->hw; in rapl_pmu_event_stop()
1700 raw_spin_lock_irqsave(&data->lock, flags); in rapl_pmu_event_stop()
1703 if (!(hwc->state & PERF_HES_STOPPED)) { in rapl_pmu_event_stop()
1704 WARN_ON_ONCE(data->n_active <= 0); in rapl_pmu_event_stop()
1705 if (--data->n_active == 0) in rapl_pmu_event_stop()
1706 hrtimer_cancel(&data->hrtimer); in rapl_pmu_event_stop()
1708 list_del(&event->active_entry); in rapl_pmu_event_stop()
1710 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in rapl_pmu_event_stop()
1711 hwc->state |= PERF_HES_STOPPED; in rapl_pmu_event_stop()
1715 if ((mode & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) { in rapl_pmu_event_stop()
1721 hwc->state |= PERF_HES_UPTODATE; in rapl_pmu_event_stop()
1724 raw_spin_unlock_irqrestore(&data->lock, flags); in rapl_pmu_event_stop()
1727 static int rapl_pmu_event_add(struct perf_event *event, int mode) in rapl_pmu_event_add() argument
1730 struct hw_perf_event *hwc = &event->hw; in rapl_pmu_event_add()
1733 raw_spin_lock_irqsave(&data->lock, flags); in rapl_pmu_event_add()
1735 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in rapl_pmu_event_add()
1737 if (mode & PERF_EF_START) in rapl_pmu_event_add()
1740 raw_spin_unlock_irqrestore(&data->lock, flags); in rapl_pmu_event_add()
1753 PERF_RAPL_PKG, /* entire package */
1772 u64 cfg = event->attr.config & RAPL_EVENT_MASK; in rapl_pmu_event_init()
1776 if (event->attr.type != event->pmu->type) in rapl_pmu_event_init()
1777 return -ENOENT; in rapl_pmu_event_init()
1781 return -EINVAL; in rapl_pmu_event_init()
1783 if (event->cpu < 0) in rapl_pmu_event_init()
1784 return -EINVAL; in rapl_pmu_event_init()
1786 /* Find out which Package the event belongs to */ in rapl_pmu_event_init()
1788 if (is_rp_pmu_cpu(pos, event->cpu)) { in rapl_pmu_event_init()
1794 return -ENODEV; in rapl_pmu_event_init()
1799 event->event_caps |= PERF_EV_CAP_READ_ACTIVE_PKG; in rapl_pmu_event_init()
1800 event->pmu_private = rp; /* Which package */ in rapl_pmu_event_init()
1801 event->hw.flags = domain; /* Which domain */ in rapl_pmu_event_init()
1803 event->hw.idx = -1; in rapl_pmu_event_init()
1804 /* Find out the index in rp->domains[] to get domain pointer */ in rapl_pmu_event_init()
1805 for (idx = 0; idx < rp->nr_domains; idx++) { in rapl_pmu_event_init()
1806 if (rp->domains[idx].id == domain) { in rapl_pmu_event_init()
1807 event->hw.idx = idx; in rapl_pmu_event_init()
1827 if (!data->n_active) in rapl_hrtimer_handle()
1830 raw_spin_lock_irqsave(&data->lock, flags); in rapl_hrtimer_handle()
1832 list_for_each_entry(event, &data->active_list, active_entry) in rapl_hrtimer_handle()
1835 raw_spin_unlock_irqrestore(&data->lock, flags); in rapl_hrtimer_handle()
1837 hrtimer_forward_now(hrtimer, data->timer_interval); in rapl_hrtimer_handle()
1866 return -ENOMEM; in cpumask_show()
1872 /* Choose a cpu for each RAPL Package */ in cpumask_show()
1898 PMU_FORMAT_ATTR(event, "config:0-7");
1922 RAPL_EVENT_ATTR_STR(energy-cores, rapl_cores, "event=0x01");
1923 RAPL_EVENT_ATTR_STR(energy-pkg, rapl_pkg, "event=0x02");
1924 RAPL_EVENT_ATTR_STR(energy-ram, rapl_ram, "event=0x03");
1925 RAPL_EVENT_ATTR_STR(energy-gpu, rapl_gpu, "event=0x04");
1926 RAPL_EVENT_ATTR_STR(energy-psys, rapl_psys, "event=0x05");
1928 RAPL_EVENT_ATTR_STR(energy-cores.unit, rapl_unit_cores, "Joules");
1929 RAPL_EVENT_ATTR_STR(energy-pkg.unit, rapl_unit_pkg, "Joules");
1930 RAPL_EVENT_ATTR_STR(energy-ram.unit, rapl_unit_ram, "Joules");
1931 RAPL_EVENT_ATTR_STR(energy-gpu.unit, rapl_unit_gpu, "Joules");
1932 RAPL_EVENT_ATTR_STR(energy-psys.unit, rapl_unit_psys, "Joules");
1934 RAPL_EVENT_ATTR_STR(energy-cores.scale, rapl_scale_cores, "2.3283064365386962890625e-10");
1935 RAPL_EVENT_ATTR_STR(energy-pkg.scale, rapl_scale_pkg, "2.3283064365386962890625e-10");
1936 RAPL_EVENT_ATTR_STR(energy-ram.scale, rapl_scale_ram, "2.3283064365386962890625e-10");
1937 RAPL_EVENT_ATTR_STR(energy-gpu.scale, rapl_scale_gpu, "2.3283064365386962890625e-10");
1938 RAPL_EVENT_ATTR_STR(energy-psys.scale, rapl_scale_psys, "2.3283064365386962890625e-10");
1949 return rapl_pmu.domain_map & BIT(domain) ? attr->mode : 0; \
1976 /* Return if PMU already covers all events supported by current RAPL Package */ in rapl_pmu_update()
1977 if (rapl_pmu.registered && !(rp->domain_map & (~rapl_pmu.domain_map))) in rapl_pmu_update()
1985 rapl_pmu.domain_map |= rp->domain_map; in rapl_pmu_update()
1999 ret = perf_pmu_register(&rapl_pmu.pmu, "power", -1); in rapl_pmu_update()
2007 rp->has_pmu = true; in rapl_pmu_update()
2013 struct rapl_package_pmu_data *data = &rp->pmu_data; in rapl_package_add_pmu()
2016 if (rp->has_pmu) in rapl_package_add_pmu()
2017 return -EEXIST; in rapl_package_add_pmu()
2021 for (idx = 0; idx < rp->nr_domains; idx++) { in rapl_package_add_pmu()
2022 struct rapl_domain *rd = &rp->domains[idx]; in rapl_package_add_pmu()
2023 int domain = rd->id; in rapl_package_add_pmu()
2026 if (!test_bit(domain, &rp->domain_map)) in rapl_package_add_pmu()
2030 * The RAPL PMU granularity is 2^-32 Joules in rapl_package_add_pmu()
2031 * data->scale[]: times of 2^-32 Joules for each ENERGY COUNTER increase in rapl_package_add_pmu()
2033 val = rd->energy_unit * (1ULL << 32); in rapl_package_add_pmu()
2035 data->scale[domain] = val; in rapl_package_add_pmu()
2045 * max_count = rpi->mask >> rpi->shift + 1 in rapl_package_add_pmu()
2046 * max_energy_pj = max_count * rd->energy_unit in rapl_package_add_pmu()
2051 val = (rpi->mask >> rpi->shift) + 1; in rapl_package_add_pmu()
2052 val *= rd->energy_unit; in rapl_package_add_pmu()
2059 pr_debug("Domain %s: hw unit %lld * 2^-32 Joules\n", rd->name, data->scale[domain]); in rapl_package_add_pmu()
2062 /* Initialize per package PMU data */ in rapl_package_add_pmu()
2063 raw_spin_lock_init(&data->lock); in rapl_package_add_pmu()
2064 INIT_LIST_HEAD(&data->active_list); in rapl_package_add_pmu()
2065 data->timer_interval = ms_to_ktime(rapl_pmu.timer_ms); in rapl_package_add_pmu()
2066 hrtimer_init(&data->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in rapl_package_add_pmu()
2067 data->hrtimer.function = rapl_hrtimer_handle; in rapl_package_add_pmu()
2077 if (!rp->has_pmu) in rapl_package_remove_pmu()
2084 if (pos->has_pmu && pos != rp) in rapl_package_remove_pmu()
2101 for (rd = rp->domains; rd < rp->domains + rp->nr_domains; rd++) { in rapl_remove_package_cpuslocked()
2109 if (rd->id == RAPL_DOMAIN_PACKAGE) { in rapl_remove_package_cpuslocked()
2113 pr_debug("remove package, undo power limit on %s: %s\n", in rapl_remove_package_cpuslocked()
2114 rp->name, rd->name); in rapl_remove_package_cpuslocked()
2115 powercap_unregister_zone(rp->priv->control_type, in rapl_remove_package_cpuslocked()
2116 &rd->power_zone); in rapl_remove_package_cpuslocked()
2119 powercap_unregister_zone(rp->priv->control_type, in rapl_remove_package_cpuslocked()
2120 &rd_package->power_zone); in rapl_remove_package_cpuslocked()
2121 list_del(&rp->plist); in rapl_remove_package_cpuslocked()
2134 * RAPL Package energy counter scope:
2135 * 1. AMD/HYGON platforms use per-PKG package energy counter
2137 * 2.1 CLX-AP platform has per-DIE package energy counter
2139 * package energy counter can be considered as per-PKG/per-DIE,
2140 * here it is considered as per-DIE.
2159 pr_err("topology_logical_(package/die)_id() returned a negative value"); in rapl_find_package_domain_cpuslocked()
2167 if (rp->id == uid in rapl_find_package_domain_cpuslocked()
2168 && rp->priv->control_type == priv->control_type) in rapl_find_package_domain_cpuslocked()
2191 return ERR_PTR(-ENOMEM); in rapl_add_package_cpuslocked()
2194 rp->id = rapl_msrs_are_pkg_scope() ? in rapl_add_package_cpuslocked()
2196 if ((int)(rp->id) < 0) { in rapl_add_package_cpuslocked()
2197 pr_err("topology_logical_(package/die)_id() returned a negative value"); in rapl_add_package_cpuslocked()
2198 return ERR_PTR(-EINVAL); in rapl_add_package_cpuslocked()
2200 rp->lead_cpu = id; in rapl_add_package_cpuslocked()
2202 snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d-die-%d", in rapl_add_package_cpuslocked()
2205 snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", in rapl_add_package_cpuslocked()
2208 rp->id = id; in rapl_add_package_cpuslocked()
2209 rp->lead_cpu = -1; in rapl_add_package_cpuslocked()
2210 snprintf(rp->name, PACKAGE_DOMAIN_NAME_LENGTH, "package-%d", id); in rapl_add_package_cpuslocked()
2213 rp->priv = priv; in rapl_add_package_cpuslocked()
2218 /* check if the package contains valid domains */ in rapl_add_package_cpuslocked()
2220 ret = -ENODEV; in rapl_add_package_cpuslocked()
2225 INIT_LIST_HEAD(&rp->plist); in rapl_add_package_cpuslocked()
2226 list_add(&rp->plist, &rapl_packages); in rapl_add_package_cpuslocked()
2231 kfree(rp->domains); in rapl_add_package_cpuslocked()
2252 if (!rp->power_zone) in power_limit_state_save()
2254 rd = power_zone_to_rapl_domain(rp->power_zone); in power_limit_state_save()
2257 &rd->rpl[i].last_power_limit); in power_limit_state_save()
2259 rd->rpl[i].last_power_limit = 0; in power_limit_state_save()
2273 if (!rp->power_zone) in power_limit_state_restore()
2275 rd = power_zone_to_rapl_domain(rp->power_zone); in power_limit_state_restore()
2277 if (rd->rpl[i].last_power_limit) in power_limit_state_restore()
2279 rd->rpl[i].last_power_limit); in power_limit_state_restore()
2285 unsigned long mode, void *_unused) in rapl_pm_callback() argument
2287 switch (mode) { in rapl_pm_callback()
2311 defaults_msr = (struct rapl_defaults *)id->driver_data; in rapl_init()
2315 return -ENOMEM; in rapl_init()