Lines Matching full:cluster

3  * arch/arm/common/mcpm_entry.c -- entry point for multi-cluster PM
24 * see Documentation/arch/arm/cluster-pm-race-avoidance.rst.
34 static void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_going_down() argument
36 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_GOING_DOWN; in __mcpm_cpu_going_down()
37 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_going_down()
42 * cluster can be torn down without disrupting this CPU.
47 static void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster) in __mcpm_cpu_down() argument
50 mcpm_sync.clusters[cluster].cpus[cpu].cpu = CPU_DOWN; in __mcpm_cpu_down()
51 sync_cache_w(&mcpm_sync.clusters[cluster].cpus[cpu].cpu); in __mcpm_cpu_down()
56 * __mcpm_outbound_leave_critical: Leave the cluster teardown critical section.
57 * @state: the final state of the cluster:
58 * CLUSTER_UP: no destructive teardown was done and the cluster has been
60 * CLUSTER_DOWN: the cluster has been torn-down, ready for power-off
63 static void __mcpm_outbound_leave_critical(unsigned int cluster, int state) in __mcpm_outbound_leave_critical() argument
66 mcpm_sync.clusters[cluster].cluster = state; in __mcpm_outbound_leave_critical()
67 sync_cache_w(&mcpm_sync.clusters[cluster].cluster); in __mcpm_outbound_leave_critical()
72 * __mcpm_outbound_enter_critical: Enter the cluster teardown critical section.
78 * observed, or the cluster is already being set up;
80 * cluster.
82 static bool __mcpm_outbound_enter_critical(unsigned int cpu, unsigned int cluster) in __mcpm_outbound_enter_critical() argument
85 struct mcpm_sync_struct *c = &mcpm_sync.clusters[cluster]; in __mcpm_outbound_enter_critical()
87 /* Warn inbound CPUs that the cluster is being torn down: */ in __mcpm_outbound_enter_critical()
88 c->cluster = CLUSTER_GOING_DOWN; in __mcpm_outbound_enter_critical()
89 sync_cache_w(&c->cluster); in __mcpm_outbound_enter_critical()
91 /* Back out if the inbound cluster is already in the critical region: */ in __mcpm_outbound_enter_critical()
98 * teardown is complete on each CPU before tearing down the cluster. in __mcpm_outbound_enter_critical()
101 * shouldn't be taking the cluster down at all: abort in that case. in __mcpm_outbound_enter_critical()
131 __mcpm_outbound_leave_critical(cluster, CLUSTER_UP); in __mcpm_outbound_enter_critical()
135 static int __mcpm_cluster_state(unsigned int cluster) in __mcpm_cluster_state() argument
137 sync_cache_r(&mcpm_sync.clusters[cluster].cluster); in __mcpm_cluster_state()
138 return mcpm_sync.clusters[cluster].cluster; in __mcpm_cluster_state()
143 void mcpm_set_entry_vector(unsigned cpu, unsigned cluster, void *ptr) in mcpm_set_entry_vector() argument
146 mcpm_entry_vectors[cluster][cpu] = val; in mcpm_set_entry_vector()
147 sync_cache_w(&mcpm_entry_vectors[cluster][cpu]); in mcpm_set_entry_vector()
152 void mcpm_set_early_poke(unsigned cpu, unsigned cluster, in mcpm_set_early_poke() argument
155 unsigned long *poke = &mcpm_entry_early_pokes[cluster][cpu][0]; in mcpm_set_early_poke()
187 static inline bool mcpm_cluster_unused(unsigned int cluster) in mcpm_cluster_unused() argument
191 cnt |= mcpm_cpu_use_count[cluster][i]; in mcpm_cluster_unused()
195 int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster) in mcpm_cpu_power_up() argument
200 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in mcpm_cpu_power_up()
212 cpu_is_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_power_up()
213 cluster_is_down = mcpm_cluster_unused(cluster); in mcpm_cpu_power_up()
215 mcpm_cpu_use_count[cluster][cpu]++; in mcpm_cpu_power_up()
224 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 1 && in mcpm_cpu_power_up()
225 mcpm_cpu_use_count[cluster][cpu] != 2); in mcpm_cpu_power_up()
228 ret = platform_ops->cluster_powerup(cluster); in mcpm_cpu_power_up()
230 ret = platform_ops->cpu_powerup(cpu, cluster); in mcpm_cpu_power_up()
241 unsigned int mpidr, cpu, cluster; in mcpm_cpu_power_down() local
247 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in mcpm_cpu_power_down()
248 pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster); in mcpm_cpu_power_down()
255 __mcpm_cpu_going_down(cpu, cluster); in mcpm_cpu_power_down()
257 BUG_ON(__mcpm_cluster_state(cluster) != CLUSTER_UP); in mcpm_cpu_power_down()
259 mcpm_cpu_use_count[cluster][cpu]--; in mcpm_cpu_power_down()
260 BUG_ON(mcpm_cpu_use_count[cluster][cpu] != 0 && in mcpm_cpu_power_down()
261 mcpm_cpu_use_count[cluster][cpu] != 1); in mcpm_cpu_power_down()
262 cpu_going_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_power_down()
263 last_man = mcpm_cluster_unused(cluster); in mcpm_cpu_power_down()
265 if (last_man && __mcpm_outbound_enter_critical(cpu, cluster)) { in mcpm_cpu_power_down()
266 platform_ops->cpu_powerdown_prepare(cpu, cluster); in mcpm_cpu_power_down()
267 platform_ops->cluster_powerdown_prepare(cluster); in mcpm_cpu_power_down()
270 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); in mcpm_cpu_power_down()
273 platform_ops->cpu_powerdown_prepare(cpu, cluster); in mcpm_cpu_power_down()
286 __mcpm_cpu_down(cpu, cluster); in mcpm_cpu_power_down()
308 int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster) in mcpm_wait_for_cpu_powerdown() argument
315 ret = platform_ops->wait_for_powerdown(cpu, cluster); in mcpm_wait_for_cpu_powerdown()
317 pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", in mcpm_wait_for_cpu_powerdown()
318 __func__, cpu, cluster, ret); in mcpm_wait_for_cpu_powerdown()
332 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in mcpm_cpu_suspend() local
334 platform_ops->cpu_suspend_prepare(cpu, cluster); in mcpm_cpu_suspend()
342 unsigned int mpidr, cpu, cluster; in mcpm_cpu_powered_up() local
351 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in mcpm_cpu_powered_up()
355 cpu_was_down = !mcpm_cpu_use_count[cluster][cpu]; in mcpm_cpu_powered_up()
356 first_man = mcpm_cluster_unused(cluster); in mcpm_cpu_powered_up()
359 platform_ops->cluster_is_up(cluster); in mcpm_cpu_powered_up()
361 mcpm_cpu_use_count[cluster][cpu] = 1; in mcpm_cpu_powered_up()
363 platform_ops->cpu_is_up(cpu, cluster); in mcpm_cpu_powered_up()
378 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in nocache_trampoline() local
381 mcpm_set_entry_vector(cpu, cluster, cpu_resume_no_hyp); in nocache_trampoline()
384 __mcpm_cpu_going_down(cpu, cluster); in nocache_trampoline()
385 BUG_ON(!__mcpm_outbound_enter_critical(cpu, cluster)); in nocache_trampoline()
387 __mcpm_outbound_leave_critical(cluster, CLUSTER_DOWN); in nocache_trampoline()
388 __mcpm_cpu_down(cpu, cluster); in nocache_trampoline()
432 * Set initial CPU and cluster states. in mcpm_sync_init()
433 * Only one cluster is assumed to be active at this point. in mcpm_sync_init()
436 mcpm_sync.clusters[i].cluster = CLUSTER_DOWN; in mcpm_sync_init()
447 mcpm_sync.clusters[this_cluster].cluster = CLUSTER_UP; in mcpm_sync_init()