Lines Matching full:cluster
11 * Cluster cache enable trampoline code adapted from MCPM framework
87 static bool sunxi_core_is_cortex_a15(unsigned int core, unsigned int cluster) in sunxi_core_is_cortex_a15() argument
90 int cpu = cluster * SUNXI_CPUS_PER_CLUSTER + core; in sunxi_core_is_cortex_a15()
102 * would be mid way in a core or cluster power sequence. in sunxi_core_is_cortex_a15()
104 pr_err("%s: Couldn't get CPU cluster %u core %u device node\n", in sunxi_core_is_cortex_a15()
105 __func__, cluster, core); in sunxi_core_is_cortex_a15()
115 static int sunxi_cpu_power_switch_set(unsigned int cpu, unsigned int cluster, in sunxi_cpu_power_switch_set() argument
121 reg = readl(prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
124 pr_debug("power clamp for cluster %u cpu %u already open\n", in sunxi_cpu_power_switch_set()
125 cluster, cpu); in sunxi_cpu_power_switch_set()
129 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
131 writel(0xfe, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
133 writel(0xf8, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
135 writel(0xf0, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
137 writel(0x00, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
140 writel(0xff, prcm_base + PRCM_PWR_SWITCH_REG(cluster, cpu)); in sunxi_cpu_power_switch_set()
158 static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster) in sunxi_cpu_powerup() argument
162 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_cpu_powerup()
163 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) in sunxi_cpu_powerup()
167 if (cluster == 0 && cpu == 0) in sunxi_cpu_powerup()
171 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
173 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
178 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
181 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
186 if (!sunxi_core_is_cortex_a15(cpu, cluster)) { in sunxi_cpu_powerup()
187 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cpu_powerup()
189 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cpu_powerup()
193 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
200 if (!sunxi_core_is_cortex_a15(cpu, cluster)) in sunxi_cpu_powerup()
203 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
206 sunxi_cpu_power_switch_set(cpu, cluster, true); in sunxi_cpu_powerup()
215 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerup()
217 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerup()
227 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
229 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
233 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
236 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cpu_powerup()
241 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
244 if (!sunxi_core_is_cortex_a15(cpu, cluster)) in sunxi_cpu_powerup()
248 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cpu_powerup()
253 static int sunxi_cluster_powerup(unsigned int cluster) in sunxi_cluster_powerup() argument
257 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_powerup()
258 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerup()
261 /* For A83T, assert cluster cores resets */ in sunxi_cluster_powerup()
263 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
265 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
270 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
272 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
274 /* assert cluster processor power-on resets */ in sunxi_cluster_powerup()
275 reg = readl(prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
277 writel(reg, prcm_base + PRCM_CPU_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
279 /* assert cluster cores resets */ in sunxi_cluster_powerup()
282 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
285 R_CPUCFG_CLUSTER_PO_RST_CTRL(cluster)); in sunxi_cluster_powerup()
289 /* assert cluster resets */ in sunxi_cluster_powerup()
290 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
300 if (!sunxi_core_is_cortex_a15(0, cluster)) in sunxi_cluster_powerup()
303 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
306 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cluster_powerup()
307 if (sunxi_core_is_cortex_a15(0, cluster)) { in sunxi_cluster_powerup()
315 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG0(cluster)); in sunxi_cluster_powerup()
317 /* clear cluster power gate */ in sunxi_cluster_powerup()
318 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerup()
323 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerup()
326 /* de-assert cluster resets */ in sunxi_cluster_powerup()
327 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
331 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerup()
334 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
336 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_powerup()
343 * enable CCI-400 and proper cluster cache disable before power down.
359 /* Flush all cache levels for this cluster. */ in sunxi_cluster_cache_disable_without_axi()
363 * Disable cluster-level coherency by masking in sunxi_cluster_cache_disable_without_axi()
374 static bool sunxi_mc_smp_cluster_is_down(unsigned int cluster) in sunxi_mc_smp_cluster_is_down() argument
379 if (sunxi_mc_smp_cpu_table[cluster][i]) in sunxi_mc_smp_cluster_is_down()
393 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_boot_secondary() local
397 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_boot_secondary()
401 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) in sunxi_mc_smp_boot_secondary()
406 if (sunxi_mc_smp_cpu_table[cluster][cpu]) in sunxi_mc_smp_boot_secondary()
409 if (sunxi_mc_smp_cluster_is_down(cluster)) { in sunxi_mc_smp_boot_secondary()
411 sunxi_cluster_powerup(cluster); in sunxi_mc_smp_boot_secondary()
418 sunxi_cpu_powerup(cpu, cluster); in sunxi_mc_smp_boot_secondary()
421 sunxi_mc_smp_cpu_table[cluster][cpu]++; in sunxi_mc_smp_boot_secondary()
430 unsigned int cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1); in sunxi_cluster_cache_disable() local
433 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_cache_disable()
438 reg = readl(cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_cache_disable()
440 writel(reg, cpucfg_base + CPUCFG_CX_CTRL_REG1(cluster)); in sunxi_cluster_cache_disable()
445 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_die() local
450 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_die()
451 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_mc_smp_cpu_die()
454 sunxi_mc_smp_cpu_table[cluster][cpu]--; in sunxi_mc_smp_cpu_die()
455 if (sunxi_mc_smp_cpu_table[cluster][cpu] == 1) { in sunxi_mc_smp_cpu_die()
461 } else if (sunxi_mc_smp_cpu_table[cluster][cpu] > 1) { in sunxi_mc_smp_cpu_die()
462 pr_err("Cluster %d CPU%d boots multiple times\n", in sunxi_mc_smp_cpu_die()
463 cluster, cpu); in sunxi_mc_smp_cpu_die()
467 last_man = sunxi_mc_smp_cluster_is_down(cluster); in sunxi_mc_smp_cpu_die()
480 static int sunxi_cpu_powerdown(unsigned int cpu, unsigned int cluster) in sunxi_cpu_powerdown() argument
485 pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu); in sunxi_cpu_powerdown()
486 if (cpu >= SUNXI_CPUS_PER_CLUSTER || cluster >= SUNXI_NR_CLUSTERS) in sunxi_cpu_powerdown()
493 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerdown()
495 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cpu_powerdown()
499 sunxi_cpu_power_switch_set(cpu, cluster, false); in sunxi_cpu_powerdown()
504 static int sunxi_cluster_powerdown(unsigned int cluster) in sunxi_cluster_powerdown() argument
508 pr_debug("%s: cluster %u\n", __func__, cluster); in sunxi_cluster_powerdown()
509 if (cluster >= SUNXI_NR_CLUSTERS) in sunxi_cluster_powerdown()
512 /* assert cluster resets or system will hang */ in sunxi_cluster_powerdown()
513 pr_debug("%s: assert cluster reset\n", __func__); in sunxi_cluster_powerdown()
514 reg = readl(cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerdown()
518 writel(reg, cpucfg_base + CPUCFG_CX_RST_CTRL(cluster)); in sunxi_cluster_powerdown()
520 /* gate cluster power */ in sunxi_cluster_powerdown()
521 pr_debug("%s: gate cluster power\n", __func__); in sunxi_cluster_powerdown()
522 reg = readl(prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerdown()
527 writel(reg, prcm_base + PRCM_PWROFF_GATING_REG(cluster)); in sunxi_cluster_powerdown()
535 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_kill() local
542 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_kill()
545 if (WARN_ON(cluster >= SUNXI_NR_CLUSTERS || in sunxi_mc_smp_cpu_kill()
564 if (sunxi_mc_smp_cpu_table[cluster][cpu]) in sunxi_mc_smp_cpu_kill()
567 reg = readl(cpucfg_base + CPUCFG_CX_STATUS(cluster)); in sunxi_mc_smp_cpu_kill()
578 sunxi_cpu_powerdown(cpu, cluster); in sunxi_mc_smp_cpu_kill()
580 if (!sunxi_mc_smp_cluster_is_down(cluster)) in sunxi_mc_smp_cpu_kill()
583 /* wait for cluster L2 WFI */ in sunxi_mc_smp_cpu_kill()
584 ret = readl_poll_timeout(cpucfg_base + CPUCFG_CX_STATUS(cluster), reg, in sunxi_mc_smp_cpu_kill()
589 * Ignore timeout on the cluster. Leaving the cluster on in sunxi_mc_smp_cpu_kill()
598 /* Power down cluster */ in sunxi_mc_smp_cpu_kill()
599 sunxi_cluster_powerdown(cluster); in sunxi_mc_smp_cpu_kill()
603 pr_debug("%s: cluster %u cpu %u powerdown: %d\n", in sunxi_mc_smp_cpu_kill()
604 __func__, cluster, cpu, ret); in sunxi_mc_smp_cpu_kill()
630 unsigned int mpidr, cpu, cluster; in sunxi_mc_smp_cpu_table_init() local
634 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1); in sunxi_mc_smp_cpu_table_init()
636 if (cluster >= SUNXI_NR_CLUSTERS || cpu >= SUNXI_CPUS_PER_CLUSTER) { in sunxi_mc_smp_cpu_table_init()
640 sunxi_mc_smp_cpu_table[cluster][cpu] = 1; in sunxi_mc_smp_cpu_table_init()
647 * We need the trampoline code to enable CCI-400 on the first cluster
868 /* Configure CCI-400 for boot cluster */ in sunxi_mc_smp_init()
871 pr_err("%s: failed to configure boot cluster: %d\n", in sunxi_mc_smp_init()
886 /* Actually enable multi cluster SMP */ in sunxi_mc_smp_init()
889 pr_info("sunxi multi cluster SMP support installed\n"); in sunxi_mc_smp_init()