Lines Matching +full:pressure +full:- +full:max

1 // SPDX-License-Identifier: GPL-2.0
81 if (!sfd || sfd->source != SCALE_FREQ_SOURCE_ARCH) { in topology_set_scale_freq_source()
104 if (sfd && sfd->source == source) { in topology_clear_scale_freq_source()
114 * use-after-free races. in topology_clear_scale_freq_source()
127 sfd->set_freq_scale(); in topology_scale_freq_tick()
167 * topology_update_hw_pressure() - Update HW pressure for CPUs
171 * Update the value of HW pressure for all @cpus in the mask. The
173 * operating on stale data when hot-plug is used for some CPUs. The
174 * @capped_freq reflects the currently allowed max CPUs frequency due to
176 * than the internal 'capacity_freq_ref' max frequency. In such case the
177 * pressure value should simply be removed, since this is an indication that
183 unsigned long max_capacity, capacity, pressure; in topology_update_hw_pressure() local
193 * the HW pressure value. in topology_update_hw_pressure()
200 pressure = max_capacity - capacity; in topology_update_hw_pressure()
202 trace_hw_pressure_update(cpu, pressure); in topology_update_hw_pressure()
205 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); in topology_update_hw_pressure()
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
228 return -ENOENT; in cpu_capacity_sysctl_add()
240 return -ENOENT; in cpu_capacity_sysctl_remove()
249 cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "topology/cpu-capacity", in register_cpu_capacity_sysctl()
297 capacity_scale = max(capacity, capacity_scale); in topology_normalize_cpu_scale()
321 ret = of_property_read_u32(cpu_node, "capacity-dmips-mhz", in topology_parse_cpu_capacity()
339 * For non-clk CPU DVFS mechanism, there's no way to get the in topology_parse_cpu_capacity()
443 cpumask_pr_args(policy->related_cpus), in init_cpu_capacity_callback()
446 cpumask_andnot(cpus_to_visit, cpus_to_visit, policy->related_cpus); in init_cpu_capacity_callback()
448 for_each_cpu(cpu, policy->related_cpus) { in init_cpu_capacity_callback()
449 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; in init_cpu_capacity_callback()
476 * On ACPI-based systems skip registering cpufreq notifier as cpufreq in register_cpufreq_notifier()
480 return -EINVAL; in register_cpufreq_notifier()
483 return -ENOMEM; in register_cpufreq_notifier()
513 * (2) -ENODEV when the device tree(DT) node is valid and found in the DT but
517 * (3) -1 if the node does not exist in the device tree
526 return -1; in get_cpu_for_node()
561 } else if (cpu != -ENODEV) { in parse_core()
563 return -EINVAL; in parse_core()
573 return -EINVAL; in parse_core()
579 } else if (leaf && cpu != -ENODEV) { in parse_core()
581 return -EINVAL; in parse_core()
632 pr_err("%pOF: cpu-map children should be clusters\n", c); in parse_cluster()
633 return -EINVAL; in parse_cluster()
641 pr_err("%pOF: Non-leaf cluster with core %s\n", in parse_cluster()
643 return -EINVAL; in parse_cluster()
670 ret = parse_cluster(c, package_id, -1, 0); in parse_socket()
678 ret = parse_cluster(socket, 0, -1, 0); in parse_socket()
696 * When topology is provided cpu-map is essentially a root in parse_dt_topology()
700 of_get_child_by_name(cn, "cpu-map"); in parse_dt_topology()
717 return -EINVAL; in parse_dt_topology()
746 * For systems with no shared cpu-side LLC but with clusters defined, in cpu_coregroup_mask()
776 if (ret && ret != -ENOENT) in update_siblings_masks()
784 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
785 cpumask_set_cpu(cpuid, &cpu_topo->llc_sibling); in update_siblings_masks()
788 if (cpuid_topo->package_id != cpu_topo->package_id) in update_siblings_masks()
791 cpumask_set_cpu(cpuid, &cpu_topo->core_sibling); in update_siblings_masks()
792 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
794 if (cpuid_topo->cluster_id != cpu_topo->cluster_id) in update_siblings_masks()
797 if (cpuid_topo->cluster_id >= 0) { in update_siblings_masks()
798 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
799 cpumask_set_cpu(cpuid, &cpu_topo->cluster_sibling); in update_siblings_masks()
802 if (cpuid_topo->core_id != cpu_topo->core_id) in update_siblings_masks()
805 cpumask_set_cpu(cpuid, &cpu_topo->thread_sibling); in update_siblings_masks()
806 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
814 cpumask_clear(&cpu_topo->llc_sibling); in clear_cpu_topology()
815 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
817 cpumask_clear(&cpu_topo->cluster_sibling); in clear_cpu_topology()
818 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
820 cpumask_clear(&cpu_topo->core_sibling); in clear_cpu_topology()
821 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
822 cpumask_clear(&cpu_topo->thread_sibling); in clear_cpu_topology()
823 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
833 cpu_topo->thread_id = -1; in reset_cpu_topology()
834 cpu_topo->core_id = -1; in reset_cpu_topology()
835 cpu_topo->cluster_id = -1; in reset_cpu_topology()
836 cpu_topo->package_id = -1; in reset_cpu_topology()
877 * arch-specific early cache level detection a chance to run. in init_cpu_topology()
886 else if (ret != -ENOENT) in init_cpu_topology()
896 if (cpuid_topo->package_id != -1) in store_cpu_topology()
899 cpuid_topo->thread_id = -1; in store_cpu_topology()
900 cpuid_topo->core_id = cpuid; in store_cpu_topology()
901 cpuid_topo->package_id = cpu_to_node(cpuid); in store_cpu_topology()
904 cpuid, cpuid_topo->package_id, cpuid_topo->core_id, in store_cpu_topology()
905 cpuid_topo->thread_id); in store_cpu_topology()