Lines Matching refs:cpu

66 	int cpu;  in topology_set_scale_freq_source()  local
77 for_each_cpu(cpu, cpus) { in topology_set_scale_freq_source()
78 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_set_scale_freq_source()
82 rcu_assign_pointer(per_cpu(sft_data, cpu), data); in topology_set_scale_freq_source()
83 cpumask_set_cpu(cpu, &scale_freq_counters_mask); in topology_set_scale_freq_source()
97 int cpu; in topology_clear_scale_freq_source() local
101 for_each_cpu(cpu, cpus) { in topology_clear_scale_freq_source()
102 sfd = rcu_dereference(*per_cpu_ptr(&sft_data, cpu)); in topology_clear_scale_freq_source()
105 rcu_assign_pointer(per_cpu(sft_data, cpu), NULL); in topology_clear_scale_freq_source()
106 cpumask_clear_cpu(cpu, &scale_freq_counters_mask); in topology_clear_scale_freq_source()
159 void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity) in topology_set_cpu_scale() argument
161 per_cpu(cpu_scale, cpu) = capacity; in topology_set_cpu_scale()
185 int cpu; in topology_update_hw_pressure() local
187 cpu = cpumask_first(cpus); in topology_update_hw_pressure()
188 max_capacity = arch_scale_cpu_capacity(cpu); in topology_update_hw_pressure()
189 max_freq = arch_scale_freq_ref(cpu); in topology_update_hw_pressure()
202 trace_hw_pressure_update(cpu, pressure); in topology_update_hw_pressure()
204 for_each_cpu(cpu, cpus) in topology_update_hw_pressure()
205 WRITE_ONCE(per_cpu(hw_pressure, cpu), pressure); in topology_update_hw_pressure()
213 struct cpu *cpu = container_of(dev, struct cpu, dev); in cpu_capacity_show() local
215 return sysfs_emit(buf, "%lu\n", topology_get_cpu_scale(cpu->dev.id)); in cpu_capacity_show()
223 static int cpu_capacity_sysctl_add(unsigned int cpu) in cpu_capacity_sysctl_add() argument
225 struct device *cpu_dev = get_cpu_device(cpu); in cpu_capacity_sysctl_add()
235 static int cpu_capacity_sysctl_remove(unsigned int cpu) in cpu_capacity_sysctl_remove() argument
237 struct device *cpu_dev = get_cpu_device(cpu); in cpu_capacity_sysctl_remove()
289 int cpu; in topology_normalize_cpu_scale() local
295 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
296 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); in topology_normalize_cpu_scale()
301 for_each_possible_cpu(cpu) { in topology_normalize_cpu_scale()
302 capacity = raw_capacity[cpu] * per_cpu(capacity_freq_ref, cpu); in topology_normalize_cpu_scale()
305 topology_set_cpu_scale(cpu, capacity); in topology_normalize_cpu_scale()
307 cpu, topology_get_cpu_scale(cpu)); in topology_normalize_cpu_scale()
311 bool __init topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu) in topology_parse_cpu_capacity() argument
333 raw_capacity[cpu] = cpu_capacity; in topology_parse_cpu_capacity()
335 cpu_node, raw_capacity[cpu]); in topology_parse_cpu_capacity()
345 per_cpu(capacity_freq_ref, cpu) = in topology_parse_cpu_capacity()
362 void __weak freq_inv_set_max_ratio(int cpu, u64 max_rate) in freq_inv_set_max_ratio() argument
373 int cpu; in topology_init_cpu_capacity_cppc() local
383 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
384 if (!cppc_get_perf_caps(cpu, &perf_caps) && in topology_init_cpu_capacity_cppc()
387 raw_capacity[cpu] = perf_caps.highest_perf; in topology_init_cpu_capacity_cppc()
388 capacity_scale = max_t(u64, capacity_scale, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
390 per_cpu(capacity_freq_ref, cpu) = cppc_perf_to_khz(&perf_caps, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
393 cpu, raw_capacity[cpu]); in topology_init_cpu_capacity_cppc()
397 pr_err("cpu_capacity: CPU%d missing/invalid highest performance.\n", cpu); in topology_init_cpu_capacity_cppc()
402 for_each_possible_cpu(cpu) { in topology_init_cpu_capacity_cppc()
403 freq_inv_set_max_ratio(cpu, in topology_init_cpu_capacity_cppc()
404 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in topology_init_cpu_capacity_cppc()
406 capacity = raw_capacity[cpu]; in topology_init_cpu_capacity_cppc()
409 topology_set_cpu_scale(cpu, capacity); in topology_init_cpu_capacity_cppc()
411 cpu, topology_get_cpu_scale(cpu)); in topology_init_cpu_capacity_cppc()
437 int cpu; in init_cpu_capacity_callback() local
448 for_each_cpu(cpu, policy->related_cpus) { in init_cpu_capacity_callback()
449 per_cpu(capacity_freq_ref, cpu) = policy->cpuinfo.max_freq; in init_cpu_capacity_callback()
450 freq_inv_set_max_ratio(cpu, in init_cpu_capacity_callback()
451 per_cpu(capacity_freq_ref, cpu) * HZ_PER_KHZ); in init_cpu_capacity_callback()
521 int cpu; in get_cpu_for_node() local
528 cpu = of_cpu_node_to_id(cpu_node); in get_cpu_for_node()
529 if (cpu >= 0) in get_cpu_for_node()
530 topology_parse_cpu_capacity(cpu_node, cpu); in get_cpu_for_node()
535 return cpu; in get_cpu_for_node()
544 int cpu; in parse_core() local
555 cpu = get_cpu_for_node(t); in parse_core()
556 if (cpu >= 0) { in parse_core()
557 cpu_topology[cpu].package_id = package_id; in parse_core()
558 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
559 cpu_topology[cpu].core_id = core_id; in parse_core()
560 cpu_topology[cpu].thread_id = i; in parse_core()
561 } else if (cpu != -ENODEV) { in parse_core()
568 cpu = get_cpu_for_node(core); in parse_core()
569 if (cpu >= 0) { in parse_core()
576 cpu_topology[cpu].package_id = package_id; in parse_core()
577 cpu_topology[cpu].cluster_id = cluster_id; in parse_core()
578 cpu_topology[cpu].core_id = core_id; in parse_core()
579 } else if (leaf && cpu != -ENODEV) { in parse_core()
686 int cpu; in parse_dt_topology() local
715 for_each_possible_cpu(cpu) in parse_dt_topology()
716 if (cpu_topology[cpu].package_id < 0) { in parse_dt_topology()
730 const struct cpumask *cpu_coregroup_mask(int cpu) in cpu_coregroup_mask() argument
732 const cpumask_t *core_mask = cpumask_of_node(cpu_to_node(cpu)); in cpu_coregroup_mask()
735 if (cpumask_subset(&cpu_topology[cpu].core_sibling, core_mask)) { in cpu_coregroup_mask()
737 core_mask = &cpu_topology[cpu].core_sibling; in cpu_coregroup_mask()
740 if (last_level_cache_is_valid(cpu)) { in cpu_coregroup_mask()
741 if (cpumask_subset(&cpu_topology[cpu].llc_sibling, core_mask)) in cpu_coregroup_mask()
742 core_mask = &cpu_topology[cpu].llc_sibling; in cpu_coregroup_mask()
751 cpumask_subset(core_mask, &cpu_topology[cpu].cluster_sibling)) in cpu_coregroup_mask()
752 core_mask = &cpu_topology[cpu].cluster_sibling; in cpu_coregroup_mask()
757 const struct cpumask *cpu_clustergroup_mask(int cpu) in cpu_clustergroup_mask() argument
763 if (cpumask_subset(cpu_coregroup_mask(cpu), in cpu_clustergroup_mask()
764 &cpu_topology[cpu].cluster_sibling)) in cpu_clustergroup_mask()
765 return topology_sibling_cpumask(cpu); in cpu_clustergroup_mask()
767 return &cpu_topology[cpu].cluster_sibling; in cpu_clustergroup_mask()
773 int cpu, ret; in update_siblings_masks() local
780 for_each_online_cpu(cpu) { in update_siblings_masks()
781 cpu_topo = &cpu_topology[cpu]; in update_siblings_masks()
783 if (last_level_cache_is_shared(cpu, cpuid)) { in update_siblings_masks()
784 cpumask_set_cpu(cpu, &cpuid_topo->llc_sibling); in update_siblings_masks()
792 cpumask_set_cpu(cpu, &cpuid_topo->core_sibling); in update_siblings_masks()
798 cpumask_set_cpu(cpu, &cpuid_topo->cluster_sibling); in update_siblings_masks()
806 cpumask_set_cpu(cpu, &cpuid_topo->thread_sibling); in update_siblings_masks()
810 static void clear_cpu_topology(int cpu) in clear_cpu_topology() argument
812 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in clear_cpu_topology()
815 cpumask_set_cpu(cpu, &cpu_topo->llc_sibling); in clear_cpu_topology()
818 cpumask_set_cpu(cpu, &cpu_topo->cluster_sibling); in clear_cpu_topology()
821 cpumask_set_cpu(cpu, &cpu_topo->core_sibling); in clear_cpu_topology()
823 cpumask_set_cpu(cpu, &cpu_topo->thread_sibling); in clear_cpu_topology()
828 unsigned int cpu; in reset_cpu_topology() local
830 for_each_possible_cpu(cpu) { in reset_cpu_topology()
831 struct cpu_topology *cpu_topo = &cpu_topology[cpu]; in reset_cpu_topology()
838 clear_cpu_topology(cpu); in reset_cpu_topology()
842 void remove_cpu_topology(unsigned int cpu) in remove_cpu_topology() argument
846 for_each_cpu(sibling, topology_core_cpumask(cpu)) in remove_cpu_topology()
847 cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); in remove_cpu_topology()
848 for_each_cpu(sibling, topology_sibling_cpumask(cpu)) in remove_cpu_topology()
849 cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); in remove_cpu_topology()
850 for_each_cpu(sibling, topology_cluster_cpumask(cpu)) in remove_cpu_topology()
851 cpumask_clear_cpu(cpu, topology_cluster_cpumask(sibling)); in remove_cpu_topology()
852 for_each_cpu(sibling, topology_llc_cpumask(cpu)) in remove_cpu_topology()
853 cpumask_clear_cpu(cpu, topology_llc_cpumask(sibling)); in remove_cpu_topology()
855 clear_cpu_topology(cpu); in remove_cpu_topology()
866 int cpu, ret; in init_cpu_topology() local
882 for_each_possible_cpu(cpu) { in init_cpu_topology()
883 ret = fetch_cache_info(cpu); in init_cpu_topology()