Lines Matching +full:timer +full:- +full:cannot +full:- +full:wake +full:- +full:cpu

1 // SPDX-License-Identifier: GPL-2.0-only
11 #include <linux/cpu.h>
31 static int tos_resident_cpu = -1;
42 * "enable-method" property of each CPU in the DT, but given that there is no
43 * arch-specific way to check this, we assume that the DT is sensible.
47 int migrate_type = -1; in psci_ops_check()
48 int cpu; in psci_ops_check() local
52 return -EOPNOTSUPP; in psci_ops_check()
61 for_each_online_cpu(cpu) in psci_ops_check()
62 if (psci_tos_resident_on(cpu)) { in psci_ops_check()
63 tos_resident_cpu = cpu; in psci_ops_check()
66 if (tos_resident_cpu == -1) in psci_ops_check()
67 pr_warn("UP Trusted OS resides on no online CPU\n"); in psci_ops_check()
80 int cpu; in down_and_up_cpus() local
86 for_each_cpu(cpu, cpus) { in down_and_up_cpus()
87 int ret = remove_cpu(cpu); in down_and_up_cpus()
91 * resident CPU. in down_and_up_cpus()
94 if (ret != -EBUSY) { in down_and_up_cpus()
96 "to power down last online CPU %d\n", in down_and_up_cpus()
97 ret, cpu); in down_and_up_cpus()
100 } else if (cpu == tos_resident_cpu) { in down_and_up_cpus()
101 if (ret != -EPERM) { in down_and_up_cpus()
103 "to power down TOS resident CPU %d\n", in down_and_up_cpus()
104 ret, cpu); in down_and_up_cpus()
109 "to power down CPU %d\n", ret, cpu); in down_and_up_cpus()
114 cpumask_set_cpu(cpu, offlined_cpus); in down_and_up_cpus()
118 for_each_cpu(cpu, offlined_cpus) { in down_and_up_cpus()
119 int ret = add_cpu(cpu); in down_and_up_cpus()
123 "to power up CPU %d\n", ret, cpu); in down_and_up_cpus()
126 cpumask_clear_cpu(cpu, offlined_cpus); in down_and_up_cpus()
156 return -ENOMEM; in alloc_init_cpu_groups()
162 return -ENOMEM; in alloc_init_cpu_groups()
174 return -ENOMEM; in alloc_init_cpu_groups()
188 int i, nb_cpu_group, err = -ENOMEM; in hotplug_tests()
203 * Of course the last CPU cannot be powered down and cpu_down() should in hotplug_tests()
210 * Take down CPUs by cpu group this time. When the last CPU is turned in hotplug_tests()
211 * off, the cpu group itself should shut down. in hotplug_tests()
217 page_buf[len - 1] = '\0'; in hotplug_tests()
236 struct cpuidle_state *state = &drv->states[index]; in suspend_cpu()
237 bool broadcast = state->flags & CPUIDLE_FLAG_TIMER_STOP; in suspend_cpu()
244 * The local timer will be shut down, we need to enter tick in suspend_cpu()
251 * this CPU might be used to broadcast wakeups, which in suspend_cpu()
262 ret = state->enter(dev, drv, index); in suspend_cpu()
275 int cpu = (long)arg; in suspend_test_thread() local
279 /* No need for an actual callback, we just want to wake up the CPU. */ in suspend_test_thread()
285 /* Set maximum priority to preempt all other threads on this CPU. */ in suspend_test_thread()
291 pr_info("CPU %d entering suspend cycles, states 1 through %d\n", in suspend_test_thread()
292 cpu, drv->state_count - 1); in suspend_test_thread()
301 for (index = 1; index < drv->state_count; ++index) { in suspend_test_thread()
303 struct cpuidle_state *state = &drv->states[index]; in suspend_test_thread()
306 * Set the timer to wake this CPU up in some time (which in suspend_test_thread()
310 * tick, so the timer will still wake us up. in suspend_test_thread()
313 usecs_to_jiffies(state->target_residency)); in suspend_test_thread()
321 * We have woken up. Re-enable IRQs to handle any in suspend_test_thread()
333 pr_err("Failed to suspend CPU %d: error %d " in suspend_test_thread()
335 cpu, ret, index, i); in suspend_test_thread()
342 * Disable the timer to make sure that the timer will not trigger in suspend_test_thread()
359 pr_info("CPU %d suspend test results: success %d, shallow states %d, errors %d\n", in suspend_test_thread()
360 cpu, nb_suspend, nb_shallow_sleep, nb_err); in suspend_test_thread()
369 int i, cpu, err = 0; in suspend_tests() local
376 return -ENOMEM; in suspend_tests()
383 * the cpuidle driver and device look-up can be carried out safely. in suspend_tests()
387 for_each_online_cpu(cpu) { in suspend_tests()
389 /* Check that cpuidle is available on that CPU. */ in suspend_tests()
390 struct cpuidle_device *dev = per_cpu(cpuidle_devices, cpu); in suspend_tests()
394 pr_warn("cpuidle not available on CPU %d, ignoring\n", in suspend_tests()
395 cpu); in suspend_tests()
400 (void *)(long)cpu, cpu, in suspend_tests()
403 pr_err("Failed to create kthread on CPU %d\n", cpu); in suspend_tests()
409 err = -ENODEV; in suspend_tests()
416 * Wake up the suspend threads. To avoid the main thread being preempted in suspend_tests()
480 case -ENOMEM: in psci_checker()
483 case -ENODEV: in psci_checker()
484 pr_warn("Could not start suspend tests on any CPU\n"); in psci_checker()