1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * processor_driver.c - ACPI Processor Driver
4 *
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2013, Intel Corporation
11 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
12 */
13
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/init.h>
17 #include <linux/cpufreq.h>
18 #include <linux/cpu.h>
19 #include <linux/cpuidle.h>
20 #include <linux/slab.h>
21 #include <linux/acpi.h>
22
23 #include <acpi/processor.h>
24
25 #include "internal.h"
26
27 #define ACPI_PROCESSOR_NOTIFY_PERFORMANCE 0x80
28 #define ACPI_PROCESSOR_NOTIFY_POWER 0x81
29 #define ACPI_PROCESSOR_NOTIFY_THROTTLING 0x82
30 #define ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED 0x85
31
32 MODULE_AUTHOR("Paul Diefenbaugh");
33 MODULE_DESCRIPTION("ACPI Processor Driver");
34 MODULE_LICENSE("GPL");
35
36 static int acpi_processor_stop(struct device *dev);
37
38 static const struct acpi_device_id processor_device_ids[] = {
39 {ACPI_PROCESSOR_OBJECT_HID, 0},
40 {ACPI_PROCESSOR_DEVICE_HID, 0},
41 {"", 0},
42 };
43 MODULE_DEVICE_TABLE(acpi, processor_device_ids);
44
45 static struct device_driver acpi_processor_driver = {
46 .name = "processor",
47 .bus = &cpu_subsys,
48 .acpi_match_table = processor_device_ids,
49 .remove = acpi_processor_stop,
50 };
51
acpi_processor_notify(acpi_handle handle,u32 event,void * data)52 static void acpi_processor_notify(acpi_handle handle, u32 event, void *data)
53 {
54 struct acpi_device *device = data;
55 struct acpi_processor *pr;
56 int saved;
57
58 if (device->handle != handle)
59 return;
60
61 pr = acpi_driver_data(device);
62 if (!pr)
63 return;
64
65 switch (event) {
66 case ACPI_PROCESSOR_NOTIFY_PERFORMANCE:
67 saved = pr->performance_platform_limit;
68 acpi_processor_ppc_has_changed(pr, 1);
69 if (saved == pr->performance_platform_limit)
70 break;
71 acpi_bus_generate_netlink_event(device->pnp.device_class,
72 dev_name(&device->dev), event,
73 pr->performance_platform_limit);
74 break;
75 case ACPI_PROCESSOR_NOTIFY_POWER:
76 acpi_processor_power_state_has_changed(pr);
77 acpi_bus_generate_netlink_event(device->pnp.device_class,
78 dev_name(&device->dev), event, 0);
79 break;
80 case ACPI_PROCESSOR_NOTIFY_THROTTLING:
81 acpi_processor_tstate_has_changed(pr);
82 acpi_bus_generate_netlink_event(device->pnp.device_class,
83 dev_name(&device->dev), event, 0);
84 break;
85 case ACPI_PROCESSOR_NOTIFY_HIGEST_PERF_CHANGED:
86 cpufreq_update_limits(pr->id);
87 acpi_bus_generate_netlink_event(device->pnp.device_class,
88 dev_name(&device->dev), event, 0);
89 break;
90 default:
91 acpi_handle_debug(handle, "Unsupported event [0x%x]\n", event);
92 break;
93 }
94
95 return;
96 }
97
98 static int __acpi_processor_start(struct acpi_device *device);
99
acpi_soft_cpu_online(unsigned int cpu)100 static int acpi_soft_cpu_online(unsigned int cpu)
101 {
102 struct acpi_processor *pr = per_cpu(processors, cpu);
103 struct acpi_device *device;
104
105 if (!pr)
106 return 0;
107
108 device = acpi_fetch_acpi_dev(pr->handle);
109 if (!device)
110 return 0;
111
112 /*
113 * CPU got physically hotplugged and onlined for the first time:
114 * Initialize missing things.
115 */
116 if (!pr->flags.previously_online) {
117 int ret;
118
119 ret = __acpi_processor_start(device);
120 WARN(ret, "Failed to start CPU: %d\n", pr->id);
121 } else {
122 /* Normal CPU soft online event. */
123 acpi_processor_ppc_has_changed(pr, 0);
124 acpi_processor_hotplug(pr);
125 acpi_processor_reevaluate_tstate(pr, false);
126 acpi_processor_tstate_has_changed(pr);
127 }
128 return 0;
129 }
130
acpi_soft_cpu_dead(unsigned int cpu)131 static int acpi_soft_cpu_dead(unsigned int cpu)
132 {
133 struct acpi_processor *pr = per_cpu(processors, cpu);
134
135 if (!pr || !acpi_fetch_acpi_dev(pr->handle))
136 return 0;
137
138 acpi_processor_reevaluate_tstate(pr, true);
139 return 0;
140 }
141
142 #ifdef CONFIG_ACPI_CPU_FREQ_PSS
acpi_pss_perf_init(struct acpi_processor * pr)143 static void acpi_pss_perf_init(struct acpi_processor *pr)
144 {
145 acpi_processor_ppc_has_changed(pr, 0);
146
147 acpi_processor_get_throttling_info(pr);
148
149 if (pr->flags.throttling)
150 pr->flags.limit = 1;
151 }
152 #else
acpi_pss_perf_init(struct acpi_processor * pr)153 static inline void acpi_pss_perf_init(struct acpi_processor *pr) {}
154 #endif /* CONFIG_ACPI_CPU_FREQ_PSS */
155
__acpi_processor_start(struct acpi_device * device)156 static int __acpi_processor_start(struct acpi_device *device)
157 {
158 struct acpi_processor *pr = acpi_driver_data(device);
159 acpi_status status;
160 int result = 0;
161
162 if (!pr)
163 return -ENODEV;
164
165 result = acpi_cppc_processor_probe(pr);
166 if (result && !IS_ENABLED(CONFIG_ACPI_CPU_FREQ_PSS))
167 dev_dbg(&device->dev, "CPPC data invalid or not present\n");
168
169 if (!cpuidle_get_driver() || cpuidle_get_driver() == &acpi_idle_driver)
170 acpi_processor_power_init(pr);
171
172 acpi_pss_perf_init(pr);
173
174 result = acpi_processor_thermal_init(pr, device);
175 if (result)
176 goto err_power_exit;
177
178 status = acpi_install_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
179 acpi_processor_notify, device);
180 if (!ACPI_SUCCESS(status)) {
181 result = -ENODEV;
182 goto err_thermal_exit;
183 }
184 pr->flags.previously_online = 1;
185
186 return 0;
187
188 err_thermal_exit:
189 acpi_processor_thermal_exit(pr, device);
190 err_power_exit:
191 acpi_processor_power_exit(pr);
192 return result;
193 }
194
acpi_processor_stop(struct device * dev)195 static int acpi_processor_stop(struct device *dev)
196 {
197 struct acpi_device *device = ACPI_COMPANION(dev);
198 struct acpi_processor *pr;
199
200 if (!device)
201 return 0;
202
203 acpi_remove_notify_handler(device->handle, ACPI_DEVICE_NOTIFY,
204 acpi_processor_notify);
205
206 pr = acpi_driver_data(device);
207 if (!pr)
208 return 0;
209 acpi_processor_power_exit(pr);
210
211 acpi_cppc_processor_exit(pr);
212
213 acpi_processor_thermal_exit(pr, device);
214
215 return 0;
216 }
217
218 bool acpi_processor_cpufreq_init;
219
acpi_processor_notifier(struct notifier_block * nb,unsigned long event,void * data)220 static int acpi_processor_notifier(struct notifier_block *nb,
221 unsigned long event, void *data)
222 {
223 struct cpufreq_policy *policy = data;
224
225 if (event == CPUFREQ_CREATE_POLICY) {
226 acpi_thermal_cpufreq_init(policy);
227 acpi_processor_ppc_init(policy);
228 } else if (event == CPUFREQ_REMOVE_POLICY) {
229 acpi_processor_ppc_exit(policy);
230 acpi_thermal_cpufreq_exit(policy);
231 }
232
233 return 0;
234 }
235
236 static struct notifier_block acpi_processor_notifier_block = {
237 .notifier_call = acpi_processor_notifier,
238 };
239
acpi_processor_init_invariance_cppc(void)240 void __weak acpi_processor_init_invariance_cppc(void)
241 { }
242
243 /*
244 * We keep the driver loaded even when ACPI is not running.
245 * This is needed for the powernow-k8 driver, that works even without
246 * ACPI, but needs symbols from this driver
247 */
248 static enum cpuhp_state hp_online;
acpi_processor_driver_init(void)249 static int __init acpi_processor_driver_init(void)
250 {
251 int result = 0;
252
253 if (acpi_disabled)
254 return 0;
255
256 if (!cpufreq_register_notifier(&acpi_processor_notifier_block,
257 CPUFREQ_POLICY_NOTIFIER)) {
258 acpi_processor_cpufreq_init = true;
259 acpi_processor_ignore_ppc_init();
260 }
261
262 result = driver_register(&acpi_processor_driver);
263 if (result < 0)
264 return result;
265
266 result = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
267 "acpi/cpu-drv:online",
268 acpi_soft_cpu_online, NULL);
269 if (result < 0)
270 goto err;
271 hp_online = result;
272 cpuhp_setup_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD, "acpi/cpu-drv:dead",
273 NULL, acpi_soft_cpu_dead);
274
275 acpi_processor_throttling_init();
276
277 /*
278 * Frequency invariance calculations on AMD platforms can't be run until
279 * after acpi_cppc_processor_probe() has been called for all online CPUs
280 */
281 acpi_processor_init_invariance_cppc();
282 return 0;
283 err:
284 driver_unregister(&acpi_processor_driver);
285 return result;
286 }
287
acpi_processor_driver_exit(void)288 static void __exit acpi_processor_driver_exit(void)
289 {
290 if (acpi_disabled)
291 return;
292
293 if (acpi_processor_cpufreq_init) {
294 cpufreq_unregister_notifier(&acpi_processor_notifier_block,
295 CPUFREQ_POLICY_NOTIFIER);
296 acpi_processor_cpufreq_init = false;
297 }
298
299 cpuhp_remove_state_nocalls(hp_online);
300 cpuhp_remove_state_nocalls(CPUHP_ACPI_CPUDRV_DEAD);
301 driver_unregister(&acpi_processor_driver);
302 }
303
304 module_init(acpi_processor_driver_init);
305 module_exit(acpi_processor_driver_exit);
306
307 MODULE_ALIAS("processor");
308