1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   *  linux/drivers/cpufreq/cpufreq.c
4   *
5   *  Copyright (C) 2001 Russell King
6   *            (C) 2002 - 2003 Dominik Brodowski <linux@brodo.de>
7   *            (C) 2013 Viresh Kumar <viresh.kumar@linaro.org>
8   *
9   *  Oct 2005 - Ashok Raj <ashok.raj@intel.com>
10   *	Added handling for CPU hotplug
11   *  Feb 2006 - Jacob Shin <jacob.shin@amd.com>
12   *	Fix handling for CPU hotplug -- affected CPUs
13   */
14  
15  #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16  
17  #include <linux/cpu.h>
18  #include <linux/cpufreq.h>
19  #include <linux/cpu_cooling.h>
20  #include <linux/delay.h>
21  #include <linux/device.h>
22  #include <linux/init.h>
23  #include <linux/kernel_stat.h>
24  #include <linux/module.h>
25  #include <linux/mutex.h>
26  #include <linux/pm_qos.h>
27  #include <linux/slab.h>
28  #include <linux/suspend.h>
29  #include <linux/syscore_ops.h>
30  #include <linux/tick.h>
31  #include <linux/units.h>
32  #include <trace/events/power.h>
33  
34  static LIST_HEAD(cpufreq_policy_list);
35  
36  /* Macros to iterate over CPU policies */
37  #define for_each_suitable_policy(__policy, __active)			 \
38  	list_for_each_entry(__policy, &cpufreq_policy_list, policy_list) \
39  		if ((__active) == !policy_is_inactive(__policy))
40  
41  #define for_each_active_policy(__policy)		\
42  	for_each_suitable_policy(__policy, true)
43  #define for_each_inactive_policy(__policy)		\
44  	for_each_suitable_policy(__policy, false)
45  
46  /* Iterate over governors */
47  static LIST_HEAD(cpufreq_governor_list);
48  #define for_each_governor(__governor)				\
49  	list_for_each_entry(__governor, &cpufreq_governor_list, governor_list)
50  
51  static char default_governor[CPUFREQ_NAME_LEN];
52  
53  /*
54   * The "cpufreq driver" - the arch- or hardware-dependent low
55   * level driver of CPUFreq support, and its spinlock. This lock
56   * also protects the cpufreq_cpu_data array.
57   */
58  static struct cpufreq_driver *cpufreq_driver;
59  static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
60  static DEFINE_RWLOCK(cpufreq_driver_lock);
61  
62  static DEFINE_STATIC_KEY_FALSE(cpufreq_freq_invariance);
cpufreq_supports_freq_invariance(void)63  bool cpufreq_supports_freq_invariance(void)
64  {
65  	return static_branch_likely(&cpufreq_freq_invariance);
66  }
67  
68  /* Flag to suspend/resume CPUFreq governors */
69  static bool cpufreq_suspended;
70  
has_target(void)71  static inline bool has_target(void)
72  {
73  	return cpufreq_driver->target_index || cpufreq_driver->target;
74  }
75  
has_target_index(void)76  bool has_target_index(void)
77  {
78  	return !!cpufreq_driver->target_index;
79  }
80  
81  /* internal prototypes */
82  static unsigned int __cpufreq_get(struct cpufreq_policy *policy);
83  static int cpufreq_init_governor(struct cpufreq_policy *policy);
84  static void cpufreq_exit_governor(struct cpufreq_policy *policy);
85  static void cpufreq_governor_limits(struct cpufreq_policy *policy);
86  static int cpufreq_set_policy(struct cpufreq_policy *policy,
87  			      struct cpufreq_governor *new_gov,
88  			      unsigned int new_pol);
89  static bool cpufreq_boost_supported(void);
90  
91  /*
92   * Two notifier lists: the "policy" list is involved in the
93   * validation process for a new CPU frequency policy; the
94   * "transition" list for kernel code that needs to handle
95   * changes to devices when the CPU clock speed changes.
96   * The mutex locks both lists.
97   */
98  static BLOCKING_NOTIFIER_HEAD(cpufreq_policy_notifier_list);
99  SRCU_NOTIFIER_HEAD_STATIC(cpufreq_transition_notifier_list);
100  
101  static int off __read_mostly;
cpufreq_disabled(void)102  static int cpufreq_disabled(void)
103  {
104  	return off;
105  }
disable_cpufreq(void)106  void disable_cpufreq(void)
107  {
108  	off = 1;
109  }
110  static DEFINE_MUTEX(cpufreq_governor_mutex);
111  
have_governor_per_policy(void)112  bool have_governor_per_policy(void)
113  {
114  	return !!(cpufreq_driver->flags & CPUFREQ_HAVE_GOVERNOR_PER_POLICY);
115  }
116  EXPORT_SYMBOL_GPL(have_governor_per_policy);
117  
118  static struct kobject *cpufreq_global_kobject;
119  
get_governor_parent_kobj(struct cpufreq_policy * policy)120  struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
121  {
122  	if (have_governor_per_policy())
123  		return &policy->kobj;
124  	else
125  		return cpufreq_global_kobject;
126  }
127  EXPORT_SYMBOL_GPL(get_governor_parent_kobj);
128  
get_cpu_idle_time_jiffy(unsigned int cpu,u64 * wall)129  static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
130  {
131  	struct kernel_cpustat kcpustat;
132  	u64 cur_wall_time;
133  	u64 idle_time;
134  	u64 busy_time;
135  
136  	cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
137  
138  	kcpustat_cpu_fetch(&kcpustat, cpu);
139  
140  	busy_time = kcpustat.cpustat[CPUTIME_USER];
141  	busy_time += kcpustat.cpustat[CPUTIME_SYSTEM];
142  	busy_time += kcpustat.cpustat[CPUTIME_IRQ];
143  	busy_time += kcpustat.cpustat[CPUTIME_SOFTIRQ];
144  	busy_time += kcpustat.cpustat[CPUTIME_STEAL];
145  	busy_time += kcpustat.cpustat[CPUTIME_NICE];
146  
147  	idle_time = cur_wall_time - busy_time;
148  	if (wall)
149  		*wall = div_u64(cur_wall_time, NSEC_PER_USEC);
150  
151  	return div_u64(idle_time, NSEC_PER_USEC);
152  }
153  
get_cpu_idle_time(unsigned int cpu,u64 * wall,int io_busy)154  u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
155  {
156  	u64 idle_time = get_cpu_idle_time_us(cpu, io_busy ? wall : NULL);
157  
158  	if (idle_time == -1ULL)
159  		return get_cpu_idle_time_jiffy(cpu, wall);
160  	else if (!io_busy)
161  		idle_time += get_cpu_iowait_time_us(cpu, wall);
162  
163  	return idle_time;
164  }
165  EXPORT_SYMBOL_GPL(get_cpu_idle_time);
166  
167  /*
168   * This is a generic cpufreq init() routine which can be used by cpufreq
169   * drivers of SMP systems. It will do following:
170   * - validate & show freq table passed
171   * - set policies transition latency
172   * - policy->cpus with all possible CPUs
173   */
cpufreq_generic_init(struct cpufreq_policy * policy,struct cpufreq_frequency_table * table,unsigned int transition_latency)174  void cpufreq_generic_init(struct cpufreq_policy *policy,
175  		struct cpufreq_frequency_table *table,
176  		unsigned int transition_latency)
177  {
178  	policy->freq_table = table;
179  	policy->cpuinfo.transition_latency = transition_latency;
180  
181  	/*
182  	 * The driver only supports the SMP configuration where all processors
183  	 * share the clock and voltage and clock.
184  	 */
185  	cpumask_setall(policy->cpus);
186  }
187  EXPORT_SYMBOL_GPL(cpufreq_generic_init);
188  
cpufreq_cpu_get_raw(unsigned int cpu)189  struct cpufreq_policy *cpufreq_cpu_get_raw(unsigned int cpu)
190  {
191  	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
192  
193  	return policy && cpumask_test_cpu(cpu, policy->cpus) ? policy : NULL;
194  }
195  EXPORT_SYMBOL_GPL(cpufreq_cpu_get_raw);
196  
cpufreq_generic_get(unsigned int cpu)197  unsigned int cpufreq_generic_get(unsigned int cpu)
198  {
199  	struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu);
200  
201  	if (!policy || IS_ERR(policy->clk)) {
202  		pr_err("%s: No %s associated to cpu: %d\n",
203  		       __func__, policy ? "clk" : "policy", cpu);
204  		return 0;
205  	}
206  
207  	return clk_get_rate(policy->clk) / 1000;
208  }
209  EXPORT_SYMBOL_GPL(cpufreq_generic_get);
210  
211  /**
212   * cpufreq_cpu_get - Return policy for a CPU and mark it as busy.
213   * @cpu: CPU to find the policy for.
214   *
215   * Call cpufreq_cpu_get_raw() to obtain a cpufreq policy for @cpu and increment
216   * the kobject reference counter of that policy.  Return a valid policy on
217   * success or NULL on failure.
218   *
219   * The policy returned by this function has to be released with the help of
220   * cpufreq_cpu_put() to balance its kobject reference counter properly.
221   */
cpufreq_cpu_get(unsigned int cpu)222  struct cpufreq_policy *cpufreq_cpu_get(unsigned int cpu)
223  {
224  	struct cpufreq_policy *policy = NULL;
225  	unsigned long flags;
226  
227  	if (WARN_ON(cpu >= nr_cpu_ids))
228  		return NULL;
229  
230  	/* get the cpufreq driver */
231  	read_lock_irqsave(&cpufreq_driver_lock, flags);
232  
233  	if (cpufreq_driver) {
234  		/* get the CPU */
235  		policy = cpufreq_cpu_get_raw(cpu);
236  		if (policy)
237  			kobject_get(&policy->kobj);
238  	}
239  
240  	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
241  
242  	return policy;
243  }
244  EXPORT_SYMBOL_GPL(cpufreq_cpu_get);
245  
246  /**
247   * cpufreq_cpu_put - Decrement kobject usage counter for cpufreq policy.
248   * @policy: cpufreq policy returned by cpufreq_cpu_get().
249   */
cpufreq_cpu_put(struct cpufreq_policy * policy)250  void cpufreq_cpu_put(struct cpufreq_policy *policy)
251  {
252  	kobject_put(&policy->kobj);
253  }
254  EXPORT_SYMBOL_GPL(cpufreq_cpu_put);
255  
256  /**
257   * cpufreq_cpu_release - Unlock a policy and decrement its usage counter.
258   * @policy: cpufreq policy returned by cpufreq_cpu_acquire().
259   */
cpufreq_cpu_release(struct cpufreq_policy * policy)260  void cpufreq_cpu_release(struct cpufreq_policy *policy)
261  {
262  	if (WARN_ON(!policy))
263  		return;
264  
265  	lockdep_assert_held(&policy->rwsem);
266  
267  	up_write(&policy->rwsem);
268  
269  	cpufreq_cpu_put(policy);
270  }
271  
272  /**
273   * cpufreq_cpu_acquire - Find policy for a CPU, mark it as busy and lock it.
274   * @cpu: CPU to find the policy for.
275   *
276   * Call cpufreq_cpu_get() to get a reference on the cpufreq policy for @cpu and
277   * if the policy returned by it is not NULL, acquire its rwsem for writing.
278   * Return the policy if it is active or release it and return NULL otherwise.
279   *
280   * The policy returned by this function has to be released with the help of
281   * cpufreq_cpu_release() in order to release its rwsem and balance its usage
282   * counter properly.
283   */
cpufreq_cpu_acquire(unsigned int cpu)284  struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu)
285  {
286  	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
287  
288  	if (!policy)
289  		return NULL;
290  
291  	down_write(&policy->rwsem);
292  
293  	if (policy_is_inactive(policy)) {
294  		cpufreq_cpu_release(policy);
295  		return NULL;
296  	}
297  
298  	return policy;
299  }
300  
301  /*********************************************************************
302   *            EXTERNALLY AFFECTING FREQUENCY CHANGES                 *
303   *********************************************************************/
304  
305  /**
306   * adjust_jiffies - Adjust the system "loops_per_jiffy".
307   * @val: CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
308   * @ci: Frequency change information.
309   *
310   * This function alters the system "loops_per_jiffy" for the clock
311   * speed change. Note that loops_per_jiffy cannot be updated on SMP
312   * systems as each CPU might be scaled differently. So, use the arch
313   * per-CPU loops_per_jiffy value wherever possible.
314   */
adjust_jiffies(unsigned long val,struct cpufreq_freqs * ci)315  static void adjust_jiffies(unsigned long val, struct cpufreq_freqs *ci)
316  {
317  #ifndef CONFIG_SMP
318  	static unsigned long l_p_j_ref;
319  	static unsigned int l_p_j_ref_freq;
320  
321  	if (ci->flags & CPUFREQ_CONST_LOOPS)
322  		return;
323  
324  	if (!l_p_j_ref_freq) {
325  		l_p_j_ref = loops_per_jiffy;
326  		l_p_j_ref_freq = ci->old;
327  		pr_debug("saving %lu as reference value for loops_per_jiffy; freq is %u kHz\n",
328  			 l_p_j_ref, l_p_j_ref_freq);
329  	}
330  	if (val == CPUFREQ_POSTCHANGE && ci->old != ci->new) {
331  		loops_per_jiffy = cpufreq_scale(l_p_j_ref, l_p_j_ref_freq,
332  								ci->new);
333  		pr_debug("scaling loops_per_jiffy to %lu for frequency %u kHz\n",
334  			 loops_per_jiffy, ci->new);
335  	}
336  #endif
337  }
338  
339  /**
340   * cpufreq_notify_transition - Notify frequency transition and adjust jiffies.
341   * @policy: cpufreq policy to enable fast frequency switching for.
342   * @freqs: contain details of the frequency update.
343   * @state: set to CPUFREQ_PRECHANGE or CPUFREQ_POSTCHANGE.
344   *
345   * This function calls the transition notifiers and adjust_jiffies().
346   *
347   * It is called twice on all CPU frequency changes that have external effects.
348   */
cpufreq_notify_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,unsigned int state)349  static void cpufreq_notify_transition(struct cpufreq_policy *policy,
350  				      struct cpufreq_freqs *freqs,
351  				      unsigned int state)
352  {
353  	int cpu;
354  
355  	BUG_ON(irqs_disabled());
356  
357  	if (cpufreq_disabled())
358  		return;
359  
360  	freqs->policy = policy;
361  	freqs->flags = cpufreq_driver->flags;
362  	pr_debug("notification %u of frequency transition to %u kHz\n",
363  		 state, freqs->new);
364  
365  	switch (state) {
366  	case CPUFREQ_PRECHANGE:
367  		/*
368  		 * Detect if the driver reported a value as "old frequency"
369  		 * which is not equal to what the cpufreq core thinks is
370  		 * "old frequency".
371  		 */
372  		if (policy->cur && policy->cur != freqs->old) {
373  			pr_debug("Warning: CPU frequency is %u, cpufreq assumed %u kHz\n",
374  				 freqs->old, policy->cur);
375  			freqs->old = policy->cur;
376  		}
377  
378  		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
379  					 CPUFREQ_PRECHANGE, freqs);
380  
381  		adjust_jiffies(CPUFREQ_PRECHANGE, freqs);
382  		break;
383  
384  	case CPUFREQ_POSTCHANGE:
385  		adjust_jiffies(CPUFREQ_POSTCHANGE, freqs);
386  		pr_debug("FREQ: %u - CPUs: %*pbl\n", freqs->new,
387  			 cpumask_pr_args(policy->cpus));
388  
389  		for_each_cpu(cpu, policy->cpus)
390  			trace_cpu_frequency(freqs->new, cpu);
391  
392  		srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
393  					 CPUFREQ_POSTCHANGE, freqs);
394  
395  		cpufreq_stats_record_transition(policy, freqs->new);
396  		policy->cur = freqs->new;
397  	}
398  }
399  
400  /* Do post notifications when there are chances that transition has failed */
cpufreq_notify_post_transition(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)401  static void cpufreq_notify_post_transition(struct cpufreq_policy *policy,
402  		struct cpufreq_freqs *freqs, int transition_failed)
403  {
404  	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
405  	if (!transition_failed)
406  		return;
407  
408  	swap(freqs->old, freqs->new);
409  	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
410  	cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE);
411  }
412  
cpufreq_freq_transition_begin(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs)413  void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
414  		struct cpufreq_freqs *freqs)
415  {
416  
417  	/*
418  	 * Catch double invocations of _begin() which lead to self-deadlock.
419  	 * ASYNC_NOTIFICATION drivers are left out because the cpufreq core
420  	 * doesn't invoke _begin() on their behalf, and hence the chances of
421  	 * double invocations are very low. Moreover, there are scenarios
422  	 * where these checks can emit false-positive warnings in these
423  	 * drivers; so we avoid that by skipping them altogether.
424  	 */
425  	WARN_ON(!(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION)
426  				&& current == policy->transition_task);
427  
428  wait:
429  	wait_event(policy->transition_wait, !policy->transition_ongoing);
430  
431  	spin_lock(&policy->transition_lock);
432  
433  	if (unlikely(policy->transition_ongoing)) {
434  		spin_unlock(&policy->transition_lock);
435  		goto wait;
436  	}
437  
438  	policy->transition_ongoing = true;
439  	policy->transition_task = current;
440  
441  	spin_unlock(&policy->transition_lock);
442  
443  	cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
444  }
445  EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
446  
cpufreq_freq_transition_end(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int transition_failed)447  void cpufreq_freq_transition_end(struct cpufreq_policy *policy,
448  		struct cpufreq_freqs *freqs, int transition_failed)
449  {
450  	if (WARN_ON(!policy->transition_ongoing))
451  		return;
452  
453  	cpufreq_notify_post_transition(policy, freqs, transition_failed);
454  
455  	arch_set_freq_scale(policy->related_cpus,
456  			    policy->cur,
457  			    arch_scale_freq_ref(policy->cpu));
458  
459  	spin_lock(&policy->transition_lock);
460  	policy->transition_ongoing = false;
461  	policy->transition_task = NULL;
462  	spin_unlock(&policy->transition_lock);
463  
464  	wake_up(&policy->transition_wait);
465  }
466  EXPORT_SYMBOL_GPL(cpufreq_freq_transition_end);
467  
468  /*
469   * Fast frequency switching status count.  Positive means "enabled", negative
470   * means "disabled" and 0 means "not decided yet".
471   */
472  static int cpufreq_fast_switch_count;
473  static DEFINE_MUTEX(cpufreq_fast_switch_lock);
474  
cpufreq_list_transition_notifiers(void)475  static void cpufreq_list_transition_notifiers(void)
476  {
477  	struct notifier_block *nb;
478  
479  	pr_info("Registered transition notifiers:\n");
480  
481  	mutex_lock(&cpufreq_transition_notifier_list.mutex);
482  
483  	for (nb = cpufreq_transition_notifier_list.head; nb; nb = nb->next)
484  		pr_info("%pS\n", nb->notifier_call);
485  
486  	mutex_unlock(&cpufreq_transition_notifier_list.mutex);
487  }
488  
489  /**
490   * cpufreq_enable_fast_switch - Enable fast frequency switching for policy.
491   * @policy: cpufreq policy to enable fast frequency switching for.
492   *
493   * Try to enable fast frequency switching for @policy.
494   *
495   * The attempt will fail if there is at least one transition notifier registered
496   * at this point, as fast frequency switching is quite fundamentally at odds
497   * with transition notifiers.  Thus if successful, it will make registration of
498   * transition notifiers fail going forward.
499   */
cpufreq_enable_fast_switch(struct cpufreq_policy * policy)500  void cpufreq_enable_fast_switch(struct cpufreq_policy *policy)
501  {
502  	lockdep_assert_held(&policy->rwsem);
503  
504  	if (!policy->fast_switch_possible)
505  		return;
506  
507  	mutex_lock(&cpufreq_fast_switch_lock);
508  	if (cpufreq_fast_switch_count >= 0) {
509  		cpufreq_fast_switch_count++;
510  		policy->fast_switch_enabled = true;
511  	} else {
512  		pr_warn("CPU%u: Fast frequency switching not enabled\n",
513  			policy->cpu);
514  		cpufreq_list_transition_notifiers();
515  	}
516  	mutex_unlock(&cpufreq_fast_switch_lock);
517  }
518  EXPORT_SYMBOL_GPL(cpufreq_enable_fast_switch);
519  
520  /**
521   * cpufreq_disable_fast_switch - Disable fast frequency switching for policy.
522   * @policy: cpufreq policy to disable fast frequency switching for.
523   */
cpufreq_disable_fast_switch(struct cpufreq_policy * policy)524  void cpufreq_disable_fast_switch(struct cpufreq_policy *policy)
525  {
526  	mutex_lock(&cpufreq_fast_switch_lock);
527  	if (policy->fast_switch_enabled) {
528  		policy->fast_switch_enabled = false;
529  		if (!WARN_ON(cpufreq_fast_switch_count <= 0))
530  			cpufreq_fast_switch_count--;
531  	}
532  	mutex_unlock(&cpufreq_fast_switch_lock);
533  }
534  EXPORT_SYMBOL_GPL(cpufreq_disable_fast_switch);
535  
__resolve_freq(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)536  static unsigned int __resolve_freq(struct cpufreq_policy *policy,
537  		unsigned int target_freq, unsigned int relation)
538  {
539  	unsigned int idx;
540  
541  	target_freq = clamp_val(target_freq, policy->min, policy->max);
542  
543  	if (!policy->freq_table)
544  		return target_freq;
545  
546  	idx = cpufreq_frequency_table_target(policy, target_freq, relation);
547  	policy->cached_resolved_idx = idx;
548  	policy->cached_target_freq = target_freq;
549  	return policy->freq_table[idx].frequency;
550  }
551  
552  /**
553   * cpufreq_driver_resolve_freq - Map a target frequency to a driver-supported
554   * one.
555   * @policy: associated policy to interrogate
556   * @target_freq: target frequency to resolve.
557   *
558   * The target to driver frequency mapping is cached in the policy.
559   *
560   * Return: Lowest driver-supported frequency greater than or equal to the
561   * given target_freq, subject to policy (min/max) and driver limitations.
562   */
cpufreq_driver_resolve_freq(struct cpufreq_policy * policy,unsigned int target_freq)563  unsigned int cpufreq_driver_resolve_freq(struct cpufreq_policy *policy,
564  					 unsigned int target_freq)
565  {
566  	return __resolve_freq(policy, target_freq, CPUFREQ_RELATION_LE);
567  }
568  EXPORT_SYMBOL_GPL(cpufreq_driver_resolve_freq);
569  
cpufreq_policy_transition_delay_us(struct cpufreq_policy * policy)570  unsigned int cpufreq_policy_transition_delay_us(struct cpufreq_policy *policy)
571  {
572  	unsigned int latency;
573  
574  	if (policy->transition_delay_us)
575  		return policy->transition_delay_us;
576  
577  	latency = policy->cpuinfo.transition_latency / NSEC_PER_USEC;
578  	if (latency)
579  		/* Give a 50% breathing room between updates */
580  		return latency + (latency >> 1);
581  
582  	return USEC_PER_MSEC;
583  }
584  EXPORT_SYMBOL_GPL(cpufreq_policy_transition_delay_us);
585  
586  /*********************************************************************
587   *                          SYSFS INTERFACE                          *
588   *********************************************************************/
show_boost(struct kobject * kobj,struct kobj_attribute * attr,char * buf)589  static ssize_t show_boost(struct kobject *kobj,
590  			  struct kobj_attribute *attr, char *buf)
591  {
592  	return sysfs_emit(buf, "%d\n", cpufreq_driver->boost_enabled);
593  }
594  
store_boost(struct kobject * kobj,struct kobj_attribute * attr,const char * buf,size_t count)595  static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
596  			   const char *buf, size_t count)
597  {
598  	bool enable;
599  
600  	if (kstrtobool(buf, &enable))
601  		return -EINVAL;
602  
603  	if (cpufreq_boost_trigger_state(enable)) {
604  		pr_err("%s: Cannot %s BOOST!\n",
605  		       __func__, enable ? "enable" : "disable");
606  		return -EINVAL;
607  	}
608  
609  	pr_debug("%s: cpufreq BOOST %s\n",
610  		 __func__, enable ? "enabled" : "disabled");
611  
612  	return count;
613  }
614  define_one_global_rw(boost);
615  
show_local_boost(struct cpufreq_policy * policy,char * buf)616  static ssize_t show_local_boost(struct cpufreq_policy *policy, char *buf)
617  {
618  	return sysfs_emit(buf, "%d\n", policy->boost_enabled);
619  }
620  
store_local_boost(struct cpufreq_policy * policy,const char * buf,size_t count)621  static ssize_t store_local_boost(struct cpufreq_policy *policy,
622  				 const char *buf, size_t count)
623  {
624  	int ret;
625  	bool enable;
626  
627  	if (kstrtobool(buf, &enable))
628  		return -EINVAL;
629  
630  	if (!cpufreq_driver->boost_enabled)
631  		return -EINVAL;
632  
633  	if (policy->boost_enabled == enable)
634  		return count;
635  
636  	policy->boost_enabled = enable;
637  
638  	cpus_read_lock();
639  	ret = cpufreq_driver->set_boost(policy, enable);
640  	cpus_read_unlock();
641  
642  	if (ret) {
643  		policy->boost_enabled = !policy->boost_enabled;
644  		return ret;
645  	}
646  
647  	return count;
648  }
649  
650  static struct freq_attr local_boost = __ATTR(boost, 0644, show_local_boost, store_local_boost);
651  
find_governor(const char * str_governor)652  static struct cpufreq_governor *find_governor(const char *str_governor)
653  {
654  	struct cpufreq_governor *t;
655  
656  	for_each_governor(t)
657  		if (!strncasecmp(str_governor, t->name, CPUFREQ_NAME_LEN))
658  			return t;
659  
660  	return NULL;
661  }
662  
get_governor(const char * str_governor)663  static struct cpufreq_governor *get_governor(const char *str_governor)
664  {
665  	struct cpufreq_governor *t;
666  
667  	mutex_lock(&cpufreq_governor_mutex);
668  	t = find_governor(str_governor);
669  	if (!t)
670  		goto unlock;
671  
672  	if (!try_module_get(t->owner))
673  		t = NULL;
674  
675  unlock:
676  	mutex_unlock(&cpufreq_governor_mutex);
677  
678  	return t;
679  }
680  
cpufreq_parse_policy(char * str_governor)681  static unsigned int cpufreq_parse_policy(char *str_governor)
682  {
683  	if (!strncasecmp(str_governor, "performance", CPUFREQ_NAME_LEN))
684  		return CPUFREQ_POLICY_PERFORMANCE;
685  
686  	if (!strncasecmp(str_governor, "powersave", CPUFREQ_NAME_LEN))
687  		return CPUFREQ_POLICY_POWERSAVE;
688  
689  	return CPUFREQ_POLICY_UNKNOWN;
690  }
691  
692  /**
693   * cpufreq_parse_governor - parse a governor string only for has_target()
694   * @str_governor: Governor name.
695   */
cpufreq_parse_governor(char * str_governor)696  static struct cpufreq_governor *cpufreq_parse_governor(char *str_governor)
697  {
698  	struct cpufreq_governor *t;
699  
700  	t = get_governor(str_governor);
701  	if (t)
702  		return t;
703  
704  	if (request_module("cpufreq_%s", str_governor))
705  		return NULL;
706  
707  	return get_governor(str_governor);
708  }
709  
710  /*
711   * cpufreq_per_cpu_attr_read() / show_##file_name() -
712   * print out cpufreq information
713   *
714   * Write out information from cpufreq_driver->policy[cpu]; object must be
715   * "unsigned int".
716   */
717  
718  #define show_one(file_name, object)			\
719  static ssize_t show_##file_name				\
720  (struct cpufreq_policy *policy, char *buf)		\
721  {							\
722  	return sysfs_emit(buf, "%u\n", policy->object);	\
723  }
724  
725  show_one(cpuinfo_min_freq, cpuinfo.min_freq);
726  show_one(cpuinfo_max_freq, cpuinfo.max_freq);
727  show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
728  show_one(scaling_min_freq, min);
729  show_one(scaling_max_freq, max);
730  
arch_freq_get_on_cpu(int cpu)731  __weak unsigned int arch_freq_get_on_cpu(int cpu)
732  {
733  	return 0;
734  }
735  
show_scaling_cur_freq(struct cpufreq_policy * policy,char * buf)736  static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf)
737  {
738  	ssize_t ret;
739  	unsigned int freq;
740  
741  	freq = arch_freq_get_on_cpu(policy->cpu);
742  	if (freq)
743  		ret = sysfs_emit(buf, "%u\n", freq);
744  	else if (cpufreq_driver->setpolicy && cpufreq_driver->get)
745  		ret = sysfs_emit(buf, "%u\n", cpufreq_driver->get(policy->cpu));
746  	else
747  		ret = sysfs_emit(buf, "%u\n", policy->cur);
748  	return ret;
749  }
750  
751  /*
752   * cpufreq_per_cpu_attr_write() / store_##file_name() - sysfs write access
753   */
754  #define store_one(file_name, object)			\
755  static ssize_t store_##file_name					\
756  (struct cpufreq_policy *policy, const char *buf, size_t count)		\
757  {									\
758  	unsigned long val;						\
759  	int ret;							\
760  									\
761  	ret = kstrtoul(buf, 0, &val);					\
762  	if (ret)							\
763  		return ret;						\
764  									\
765  	ret = freq_qos_update_request(policy->object##_freq_req, val);\
766  	return ret >= 0 ? count : ret;					\
767  }
768  
769  store_one(scaling_min_freq, min);
770  store_one(scaling_max_freq, max);
771  
772  /*
773   * show_cpuinfo_cur_freq - current CPU frequency as detected by hardware
774   */
show_cpuinfo_cur_freq(struct cpufreq_policy * policy,char * buf)775  static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy,
776  					char *buf)
777  {
778  	unsigned int cur_freq = __cpufreq_get(policy);
779  
780  	if (cur_freq)
781  		return sysfs_emit(buf, "%u\n", cur_freq);
782  
783  	return sysfs_emit(buf, "<unknown>\n");
784  }
785  
786  /*
787   * show_scaling_governor - show the current policy for the specified CPU
788   */
show_scaling_governor(struct cpufreq_policy * policy,char * buf)789  static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf)
790  {
791  	if (policy->policy == CPUFREQ_POLICY_POWERSAVE)
792  		return sysfs_emit(buf, "powersave\n");
793  	else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE)
794  		return sysfs_emit(buf, "performance\n");
795  	else if (policy->governor)
796  		return sysfs_emit(buf, "%s\n", policy->governor->name);
797  	return -EINVAL;
798  }
799  
800  /*
801   * store_scaling_governor - store policy for the specified CPU
802   */
store_scaling_governor(struct cpufreq_policy * policy,const char * buf,size_t count)803  static ssize_t store_scaling_governor(struct cpufreq_policy *policy,
804  					const char *buf, size_t count)
805  {
806  	char str_governor[16];
807  	int ret;
808  
809  	ret = sscanf(buf, "%15s", str_governor);
810  	if (ret != 1)
811  		return -EINVAL;
812  
813  	if (cpufreq_driver->setpolicy) {
814  		unsigned int new_pol;
815  
816  		new_pol = cpufreq_parse_policy(str_governor);
817  		if (!new_pol)
818  			return -EINVAL;
819  
820  		ret = cpufreq_set_policy(policy, NULL, new_pol);
821  	} else {
822  		struct cpufreq_governor *new_gov;
823  
824  		new_gov = cpufreq_parse_governor(str_governor);
825  		if (!new_gov)
826  			return -EINVAL;
827  
828  		ret = cpufreq_set_policy(policy, new_gov,
829  					 CPUFREQ_POLICY_UNKNOWN);
830  
831  		module_put(new_gov->owner);
832  	}
833  
834  	return ret ? ret : count;
835  }
836  
837  /*
838   * show_scaling_driver - show the cpufreq driver currently loaded
839   */
show_scaling_driver(struct cpufreq_policy * policy,char * buf)840  static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf)
841  {
842  	return scnprintf(buf, CPUFREQ_NAME_PLEN, "%s\n", cpufreq_driver->name);
843  }
844  
845  /*
846   * show_scaling_available_governors - show the available CPUfreq governors
847   */
show_scaling_available_governors(struct cpufreq_policy * policy,char * buf)848  static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy,
849  						char *buf)
850  {
851  	ssize_t i = 0;
852  	struct cpufreq_governor *t;
853  
854  	if (!has_target()) {
855  		i += sysfs_emit(buf, "performance powersave");
856  		goto out;
857  	}
858  
859  	mutex_lock(&cpufreq_governor_mutex);
860  	for_each_governor(t) {
861  		if (i >= (ssize_t) ((PAGE_SIZE / sizeof(char))
862  		    - (CPUFREQ_NAME_LEN + 2)))
863  			break;
864  		i += sysfs_emit_at(buf, i, "%s ", t->name);
865  	}
866  	mutex_unlock(&cpufreq_governor_mutex);
867  out:
868  	i += sysfs_emit_at(buf, i, "\n");
869  	return i;
870  }
871  
cpufreq_show_cpus(const struct cpumask * mask,char * buf)872  ssize_t cpufreq_show_cpus(const struct cpumask *mask, char *buf)
873  {
874  	ssize_t i = 0;
875  	unsigned int cpu;
876  
877  	for_each_cpu(cpu, mask) {
878  		i += sysfs_emit_at(buf, i, "%u ", cpu);
879  		if (i >= (PAGE_SIZE - 5))
880  			break;
881  	}
882  
883  	/* Remove the extra space at the end */
884  	i--;
885  
886  	i += sysfs_emit_at(buf, i, "\n");
887  	return i;
888  }
889  EXPORT_SYMBOL_GPL(cpufreq_show_cpus);
890  
891  /*
892   * show_related_cpus - show the CPUs affected by each transition even if
893   * hw coordination is in use
894   */
show_related_cpus(struct cpufreq_policy * policy,char * buf)895  static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf)
896  {
897  	return cpufreq_show_cpus(policy->related_cpus, buf);
898  }
899  
900  /*
901   * show_affected_cpus - show the CPUs affected by each transition
902   */
show_affected_cpus(struct cpufreq_policy * policy,char * buf)903  static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf)
904  {
905  	return cpufreq_show_cpus(policy->cpus, buf);
906  }
907  
store_scaling_setspeed(struct cpufreq_policy * policy,const char * buf,size_t count)908  static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy,
909  					const char *buf, size_t count)
910  {
911  	unsigned int freq = 0;
912  	unsigned int ret;
913  
914  	if (!policy->governor || !policy->governor->store_setspeed)
915  		return -EINVAL;
916  
917  	ret = sscanf(buf, "%u", &freq);
918  	if (ret != 1)
919  		return -EINVAL;
920  
921  	policy->governor->store_setspeed(policy, freq);
922  
923  	return count;
924  }
925  
show_scaling_setspeed(struct cpufreq_policy * policy,char * buf)926  static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf)
927  {
928  	if (!policy->governor || !policy->governor->show_setspeed)
929  		return sysfs_emit(buf, "<unsupported>\n");
930  
931  	return policy->governor->show_setspeed(policy, buf);
932  }
933  
934  /*
935   * show_bios_limit - show the current cpufreq HW/BIOS limitation
936   */
show_bios_limit(struct cpufreq_policy * policy,char * buf)937  static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf)
938  {
939  	unsigned int limit;
940  	int ret;
941  	ret = cpufreq_driver->bios_limit(policy->cpu, &limit);
942  	if (!ret)
943  		return sysfs_emit(buf, "%u\n", limit);
944  	return sysfs_emit(buf, "%u\n", policy->cpuinfo.max_freq);
945  }
946  
947  cpufreq_freq_attr_ro_perm(cpuinfo_cur_freq, 0400);
948  cpufreq_freq_attr_ro(cpuinfo_min_freq);
949  cpufreq_freq_attr_ro(cpuinfo_max_freq);
950  cpufreq_freq_attr_ro(cpuinfo_transition_latency);
951  cpufreq_freq_attr_ro(scaling_available_governors);
952  cpufreq_freq_attr_ro(scaling_driver);
953  cpufreq_freq_attr_ro(scaling_cur_freq);
954  cpufreq_freq_attr_ro(bios_limit);
955  cpufreq_freq_attr_ro(related_cpus);
956  cpufreq_freq_attr_ro(affected_cpus);
957  cpufreq_freq_attr_rw(scaling_min_freq);
958  cpufreq_freq_attr_rw(scaling_max_freq);
959  cpufreq_freq_attr_rw(scaling_governor);
960  cpufreq_freq_attr_rw(scaling_setspeed);
961  
962  static struct attribute *cpufreq_attrs[] = {
963  	&cpuinfo_min_freq.attr,
964  	&cpuinfo_max_freq.attr,
965  	&cpuinfo_transition_latency.attr,
966  	&scaling_min_freq.attr,
967  	&scaling_max_freq.attr,
968  	&affected_cpus.attr,
969  	&related_cpus.attr,
970  	&scaling_governor.attr,
971  	&scaling_driver.attr,
972  	&scaling_available_governors.attr,
973  	&scaling_setspeed.attr,
974  	NULL
975  };
976  ATTRIBUTE_GROUPS(cpufreq);
977  
978  #define to_policy(k) container_of(k, struct cpufreq_policy, kobj)
979  #define to_attr(a) container_of(a, struct freq_attr, attr)
980  
show(struct kobject * kobj,struct attribute * attr,char * buf)981  static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
982  {
983  	struct cpufreq_policy *policy = to_policy(kobj);
984  	struct freq_attr *fattr = to_attr(attr);
985  	ssize_t ret = -EBUSY;
986  
987  	if (!fattr->show)
988  		return -EIO;
989  
990  	down_read(&policy->rwsem);
991  	if (likely(!policy_is_inactive(policy)))
992  		ret = fattr->show(policy, buf);
993  	up_read(&policy->rwsem);
994  
995  	return ret;
996  }
997  
store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t count)998  static ssize_t store(struct kobject *kobj, struct attribute *attr,
999  		     const char *buf, size_t count)
1000  {
1001  	struct cpufreq_policy *policy = to_policy(kobj);
1002  	struct freq_attr *fattr = to_attr(attr);
1003  	ssize_t ret = -EBUSY;
1004  
1005  	if (!fattr->store)
1006  		return -EIO;
1007  
1008  	down_write(&policy->rwsem);
1009  	if (likely(!policy_is_inactive(policy)))
1010  		ret = fattr->store(policy, buf, count);
1011  	up_write(&policy->rwsem);
1012  
1013  	return ret;
1014  }
1015  
cpufreq_sysfs_release(struct kobject * kobj)1016  static void cpufreq_sysfs_release(struct kobject *kobj)
1017  {
1018  	struct cpufreq_policy *policy = to_policy(kobj);
1019  	pr_debug("last reference is dropped\n");
1020  	complete(&policy->kobj_unregister);
1021  }
1022  
1023  static const struct sysfs_ops sysfs_ops = {
1024  	.show	= show,
1025  	.store	= store,
1026  };
1027  
1028  static const struct kobj_type ktype_cpufreq = {
1029  	.sysfs_ops	= &sysfs_ops,
1030  	.default_groups	= cpufreq_groups,
1031  	.release	= cpufreq_sysfs_release,
1032  };
1033  
add_cpu_dev_symlink(struct cpufreq_policy * policy,unsigned int cpu,struct device * dev)1034  static void add_cpu_dev_symlink(struct cpufreq_policy *policy, unsigned int cpu,
1035  				struct device *dev)
1036  {
1037  	if (unlikely(!dev))
1038  		return;
1039  
1040  	if (cpumask_test_and_set_cpu(cpu, policy->real_cpus))
1041  		return;
1042  
1043  	dev_dbg(dev, "%s: Adding symlink\n", __func__);
1044  	if (sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"))
1045  		dev_err(dev, "cpufreq symlink creation failed\n");
1046  }
1047  
remove_cpu_dev_symlink(struct cpufreq_policy * policy,int cpu,struct device * dev)1048  static void remove_cpu_dev_symlink(struct cpufreq_policy *policy, int cpu,
1049  				   struct device *dev)
1050  {
1051  	dev_dbg(dev, "%s: Removing symlink\n", __func__);
1052  	sysfs_remove_link(&dev->kobj, "cpufreq");
1053  	cpumask_clear_cpu(cpu, policy->real_cpus);
1054  }
1055  
cpufreq_add_dev_interface(struct cpufreq_policy * policy)1056  static int cpufreq_add_dev_interface(struct cpufreq_policy *policy)
1057  {
1058  	struct freq_attr **drv_attr;
1059  	int ret = 0;
1060  
1061  	/* set up files for this cpu device */
1062  	drv_attr = cpufreq_driver->attr;
1063  	while (drv_attr && *drv_attr) {
1064  		ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr));
1065  		if (ret)
1066  			return ret;
1067  		drv_attr++;
1068  	}
1069  	if (cpufreq_driver->get) {
1070  		ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr);
1071  		if (ret)
1072  			return ret;
1073  	}
1074  
1075  	ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
1076  	if (ret)
1077  		return ret;
1078  
1079  	if (cpufreq_driver->bios_limit) {
1080  		ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
1081  		if (ret)
1082  			return ret;
1083  	}
1084  
1085  	if (cpufreq_boost_supported()) {
1086  		ret = sysfs_create_file(&policy->kobj, &local_boost.attr);
1087  		if (ret)
1088  			return ret;
1089  	}
1090  
1091  	return 0;
1092  }
1093  
cpufreq_init_policy(struct cpufreq_policy * policy)1094  static int cpufreq_init_policy(struct cpufreq_policy *policy)
1095  {
1096  	struct cpufreq_governor *gov = NULL;
1097  	unsigned int pol = CPUFREQ_POLICY_UNKNOWN;
1098  	int ret;
1099  
1100  	if (has_target()) {
1101  		/* Update policy governor to the one used before hotplug. */
1102  		gov = get_governor(policy->last_governor);
1103  		if (gov) {
1104  			pr_debug("Restoring governor %s for cpu %d\n",
1105  				 gov->name, policy->cpu);
1106  		} else {
1107  			gov = get_governor(default_governor);
1108  		}
1109  
1110  		if (!gov) {
1111  			gov = cpufreq_default_governor();
1112  			__module_get(gov->owner);
1113  		}
1114  
1115  	} else {
1116  
1117  		/* Use the default policy if there is no last_policy. */
1118  		if (policy->last_policy) {
1119  			pol = policy->last_policy;
1120  		} else {
1121  			pol = cpufreq_parse_policy(default_governor);
1122  			/*
1123  			 * In case the default governor is neither "performance"
1124  			 * nor "powersave", fall back to the initial policy
1125  			 * value set by the driver.
1126  			 */
1127  			if (pol == CPUFREQ_POLICY_UNKNOWN)
1128  				pol = policy->policy;
1129  		}
1130  		if (pol != CPUFREQ_POLICY_PERFORMANCE &&
1131  		    pol != CPUFREQ_POLICY_POWERSAVE)
1132  			return -ENODATA;
1133  	}
1134  
1135  	ret = cpufreq_set_policy(policy, gov, pol);
1136  	if (gov)
1137  		module_put(gov->owner);
1138  
1139  	return ret;
1140  }
1141  
cpufreq_add_policy_cpu(struct cpufreq_policy * policy,unsigned int cpu)1142  static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu)
1143  {
1144  	int ret = 0;
1145  
1146  	/* Has this CPU been taken care of already? */
1147  	if (cpumask_test_cpu(cpu, policy->cpus))
1148  		return 0;
1149  
1150  	down_write(&policy->rwsem);
1151  	if (has_target())
1152  		cpufreq_stop_governor(policy);
1153  
1154  	cpumask_set_cpu(cpu, policy->cpus);
1155  
1156  	if (has_target()) {
1157  		ret = cpufreq_start_governor(policy);
1158  		if (ret)
1159  			pr_err("%s: Failed to start governor\n", __func__);
1160  	}
1161  	up_write(&policy->rwsem);
1162  	return ret;
1163  }
1164  
refresh_frequency_limits(struct cpufreq_policy * policy)1165  void refresh_frequency_limits(struct cpufreq_policy *policy)
1166  {
1167  	if (!policy_is_inactive(policy)) {
1168  		pr_debug("updating policy for CPU %u\n", policy->cpu);
1169  
1170  		cpufreq_set_policy(policy, policy->governor, policy->policy);
1171  	}
1172  }
1173  EXPORT_SYMBOL(refresh_frequency_limits);
1174  
handle_update(struct work_struct * work)1175  static void handle_update(struct work_struct *work)
1176  {
1177  	struct cpufreq_policy *policy =
1178  		container_of(work, struct cpufreq_policy, update);
1179  
1180  	pr_debug("handle_update for cpu %u called\n", policy->cpu);
1181  	down_write(&policy->rwsem);
1182  	refresh_frequency_limits(policy);
1183  	up_write(&policy->rwsem);
1184  }
1185  
cpufreq_notifier_min(struct notifier_block * nb,unsigned long freq,void * data)1186  static int cpufreq_notifier_min(struct notifier_block *nb, unsigned long freq,
1187  				void *data)
1188  {
1189  	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_min);
1190  
1191  	schedule_work(&policy->update);
1192  	return 0;
1193  }
1194  
cpufreq_notifier_max(struct notifier_block * nb,unsigned long freq,void * data)1195  static int cpufreq_notifier_max(struct notifier_block *nb, unsigned long freq,
1196  				void *data)
1197  {
1198  	struct cpufreq_policy *policy = container_of(nb, struct cpufreq_policy, nb_max);
1199  
1200  	schedule_work(&policy->update);
1201  	return 0;
1202  }
1203  
cpufreq_policy_put_kobj(struct cpufreq_policy * policy)1204  static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy)
1205  {
1206  	struct kobject *kobj;
1207  	struct completion *cmp;
1208  
1209  	down_write(&policy->rwsem);
1210  	cpufreq_stats_free_table(policy);
1211  	kobj = &policy->kobj;
1212  	cmp = &policy->kobj_unregister;
1213  	up_write(&policy->rwsem);
1214  	kobject_put(kobj);
1215  
1216  	/*
1217  	 * We need to make sure that the underlying kobj is
1218  	 * actually not referenced anymore by anybody before we
1219  	 * proceed with unloading.
1220  	 */
1221  	pr_debug("waiting for dropping of refcount\n");
1222  	wait_for_completion(cmp);
1223  	pr_debug("wait complete\n");
1224  }
1225  
cpufreq_policy_alloc(unsigned int cpu)1226  static struct cpufreq_policy *cpufreq_policy_alloc(unsigned int cpu)
1227  {
1228  	struct cpufreq_policy *policy;
1229  	struct device *dev = get_cpu_device(cpu);
1230  	int ret;
1231  
1232  	if (!dev)
1233  		return NULL;
1234  
1235  	policy = kzalloc(sizeof(*policy), GFP_KERNEL);
1236  	if (!policy)
1237  		return NULL;
1238  
1239  	if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL))
1240  		goto err_free_policy;
1241  
1242  	if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1243  		goto err_free_cpumask;
1244  
1245  	if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1246  		goto err_free_rcpumask;
1247  
1248  	init_completion(&policy->kobj_unregister);
1249  	ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq,
1250  				   cpufreq_global_kobject, "policy%u", cpu);
1251  	if (ret) {
1252  		dev_err(dev, "%s: failed to init policy->kobj: %d\n", __func__, ret);
1253  		/*
1254  		 * The entire policy object will be freed below, but the extra
1255  		 * memory allocated for the kobject name needs to be freed by
1256  		 * releasing the kobject.
1257  		 */
1258  		kobject_put(&policy->kobj);
1259  		goto err_free_real_cpus;
1260  	}
1261  
1262  	freq_constraints_init(&policy->constraints);
1263  
1264  	policy->nb_min.notifier_call = cpufreq_notifier_min;
1265  	policy->nb_max.notifier_call = cpufreq_notifier_max;
1266  
1267  	ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MIN,
1268  				    &policy->nb_min);
1269  	if (ret) {
1270  		dev_err(dev, "Failed to register MIN QoS notifier: %d (CPU%u)\n",
1271  			ret, cpu);
1272  		goto err_kobj_remove;
1273  	}
1274  
1275  	ret = freq_qos_add_notifier(&policy->constraints, FREQ_QOS_MAX,
1276  				    &policy->nb_max);
1277  	if (ret) {
1278  		dev_err(dev, "Failed to register MAX QoS notifier: %d (CPU%u)\n",
1279  			ret, cpu);
1280  		goto err_min_qos_notifier;
1281  	}
1282  
1283  	INIT_LIST_HEAD(&policy->policy_list);
1284  	init_rwsem(&policy->rwsem);
1285  	spin_lock_init(&policy->transition_lock);
1286  	init_waitqueue_head(&policy->transition_wait);
1287  	INIT_WORK(&policy->update, handle_update);
1288  
1289  	policy->cpu = cpu;
1290  	return policy;
1291  
1292  err_min_qos_notifier:
1293  	freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1294  				 &policy->nb_min);
1295  err_kobj_remove:
1296  	cpufreq_policy_put_kobj(policy);
1297  err_free_real_cpus:
1298  	free_cpumask_var(policy->real_cpus);
1299  err_free_rcpumask:
1300  	free_cpumask_var(policy->related_cpus);
1301  err_free_cpumask:
1302  	free_cpumask_var(policy->cpus);
1303  err_free_policy:
1304  	kfree(policy);
1305  
1306  	return NULL;
1307  }
1308  
cpufreq_policy_free(struct cpufreq_policy * policy)1309  static void cpufreq_policy_free(struct cpufreq_policy *policy)
1310  {
1311  	unsigned long flags;
1312  	int cpu;
1313  
1314  	/*
1315  	 * The callers must ensure the policy is inactive by now, to avoid any
1316  	 * races with show()/store() callbacks.
1317  	 */
1318  	if (unlikely(!policy_is_inactive(policy)))
1319  		pr_warn("%s: Freeing active policy\n", __func__);
1320  
1321  	/* Remove policy from list */
1322  	write_lock_irqsave(&cpufreq_driver_lock, flags);
1323  	list_del(&policy->policy_list);
1324  
1325  	for_each_cpu(cpu, policy->related_cpus)
1326  		per_cpu(cpufreq_cpu_data, cpu) = NULL;
1327  	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1328  
1329  	freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MAX,
1330  				 &policy->nb_max);
1331  	freq_qos_remove_notifier(&policy->constraints, FREQ_QOS_MIN,
1332  				 &policy->nb_min);
1333  
1334  	/* Cancel any pending policy->update work before freeing the policy. */
1335  	cancel_work_sync(&policy->update);
1336  
1337  	if (policy->max_freq_req) {
1338  		/*
1339  		 * Remove max_freq_req after sending CPUFREQ_REMOVE_POLICY
1340  		 * notification, since CPUFREQ_CREATE_POLICY notification was
1341  		 * sent after adding max_freq_req earlier.
1342  		 */
1343  		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1344  					     CPUFREQ_REMOVE_POLICY, policy);
1345  		freq_qos_remove_request(policy->max_freq_req);
1346  	}
1347  
1348  	freq_qos_remove_request(policy->min_freq_req);
1349  	kfree(policy->min_freq_req);
1350  
1351  	cpufreq_policy_put_kobj(policy);
1352  	free_cpumask_var(policy->real_cpus);
1353  	free_cpumask_var(policy->related_cpus);
1354  	free_cpumask_var(policy->cpus);
1355  	kfree(policy);
1356  }
1357  
cpufreq_online(unsigned int cpu)1358  static int cpufreq_online(unsigned int cpu)
1359  {
1360  	struct cpufreq_policy *policy;
1361  	bool new_policy;
1362  	unsigned long flags;
1363  	unsigned int j;
1364  	int ret;
1365  
1366  	pr_debug("%s: bringing CPU%u online\n", __func__, cpu);
1367  
1368  	/* Check if this CPU already has a policy to manage it */
1369  	policy = per_cpu(cpufreq_cpu_data, cpu);
1370  	if (policy) {
1371  		WARN_ON(!cpumask_test_cpu(cpu, policy->related_cpus));
1372  		if (!policy_is_inactive(policy))
1373  			return cpufreq_add_policy_cpu(policy, cpu);
1374  
1375  		/* This is the only online CPU for the policy.  Start over. */
1376  		new_policy = false;
1377  		down_write(&policy->rwsem);
1378  		policy->cpu = cpu;
1379  		policy->governor = NULL;
1380  	} else {
1381  		new_policy = true;
1382  		policy = cpufreq_policy_alloc(cpu);
1383  		if (!policy)
1384  			return -ENOMEM;
1385  		down_write(&policy->rwsem);
1386  	}
1387  
1388  	if (!new_policy && cpufreq_driver->online) {
1389  		/* Recover policy->cpus using related_cpus */
1390  		cpumask_copy(policy->cpus, policy->related_cpus);
1391  
1392  		ret = cpufreq_driver->online(policy);
1393  		if (ret) {
1394  			pr_debug("%s: %d: initialization failed\n", __func__,
1395  				 __LINE__);
1396  			goto out_exit_policy;
1397  		}
1398  	} else {
1399  		cpumask_copy(policy->cpus, cpumask_of(cpu));
1400  
1401  		/*
1402  		 * Call driver. From then on the cpufreq must be able
1403  		 * to accept all calls to ->verify and ->setpolicy for this CPU.
1404  		 */
1405  		ret = cpufreq_driver->init(policy);
1406  		if (ret) {
1407  			pr_debug("%s: %d: initialization failed\n", __func__,
1408  				 __LINE__);
1409  			goto out_free_policy;
1410  		}
1411  
1412  		/* Let the per-policy boost flag mirror the cpufreq_driver boost during init */
1413  		if (cpufreq_boost_enabled() && policy_has_boost_freq(policy))
1414  			policy->boost_enabled = true;
1415  
1416  		/*
1417  		 * The initialization has succeeded and the policy is online.
1418  		 * If there is a problem with its frequency table, take it
1419  		 * offline and drop it.
1420  		 */
1421  		ret = cpufreq_table_validate_and_sort(policy);
1422  		if (ret)
1423  			goto out_offline_policy;
1424  
1425  		/* related_cpus should at least include policy->cpus. */
1426  		cpumask_copy(policy->related_cpus, policy->cpus);
1427  	}
1428  
1429  	/*
1430  	 * affected cpus must always be the one, which are online. We aren't
1431  	 * managing offline cpus here.
1432  	 */
1433  	cpumask_and(policy->cpus, policy->cpus, cpu_online_mask);
1434  
1435  	if (new_policy) {
1436  		for_each_cpu(j, policy->related_cpus) {
1437  			per_cpu(cpufreq_cpu_data, j) = policy;
1438  			add_cpu_dev_symlink(policy, j, get_cpu_device(j));
1439  		}
1440  
1441  		policy->min_freq_req = kzalloc(2 * sizeof(*policy->min_freq_req),
1442  					       GFP_KERNEL);
1443  		if (!policy->min_freq_req) {
1444  			ret = -ENOMEM;
1445  			goto out_destroy_policy;
1446  		}
1447  
1448  		ret = freq_qos_add_request(&policy->constraints,
1449  					   policy->min_freq_req, FREQ_QOS_MIN,
1450  					   FREQ_QOS_MIN_DEFAULT_VALUE);
1451  		if (ret < 0) {
1452  			/*
1453  			 * So we don't call freq_qos_remove_request() for an
1454  			 * uninitialized request.
1455  			 */
1456  			kfree(policy->min_freq_req);
1457  			policy->min_freq_req = NULL;
1458  			goto out_destroy_policy;
1459  		}
1460  
1461  		/*
1462  		 * This must be initialized right here to avoid calling
1463  		 * freq_qos_remove_request() on uninitialized request in case
1464  		 * of errors.
1465  		 */
1466  		policy->max_freq_req = policy->min_freq_req + 1;
1467  
1468  		ret = freq_qos_add_request(&policy->constraints,
1469  					   policy->max_freq_req, FREQ_QOS_MAX,
1470  					   FREQ_QOS_MAX_DEFAULT_VALUE);
1471  		if (ret < 0) {
1472  			policy->max_freq_req = NULL;
1473  			goto out_destroy_policy;
1474  		}
1475  
1476  		blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
1477  				CPUFREQ_CREATE_POLICY, policy);
1478  	}
1479  
1480  	if (cpufreq_driver->get && has_target()) {
1481  		policy->cur = cpufreq_driver->get(policy->cpu);
1482  		if (!policy->cur) {
1483  			ret = -EIO;
1484  			pr_err("%s: ->get() failed\n", __func__);
1485  			goto out_destroy_policy;
1486  		}
1487  	}
1488  
1489  	/*
1490  	 * Sometimes boot loaders set CPU frequency to a value outside of
1491  	 * frequency table present with cpufreq core. In such cases CPU might be
1492  	 * unstable if it has to run on that frequency for long duration of time
1493  	 * and so its better to set it to a frequency which is specified in
1494  	 * freq-table. This also makes cpufreq stats inconsistent as
1495  	 * cpufreq-stats would fail to register because current frequency of CPU
1496  	 * isn't found in freq-table.
1497  	 *
1498  	 * Because we don't want this change to effect boot process badly, we go
1499  	 * for the next freq which is >= policy->cur ('cur' must be set by now,
1500  	 * otherwise we will end up setting freq to lowest of the table as 'cur'
1501  	 * is initialized to zero).
1502  	 *
1503  	 * We are passing target-freq as "policy->cur - 1" otherwise
1504  	 * __cpufreq_driver_target() would simply fail, as policy->cur will be
1505  	 * equal to target-freq.
1506  	 */
1507  	if ((cpufreq_driver->flags & CPUFREQ_NEED_INITIAL_FREQ_CHECK)
1508  	    && has_target()) {
1509  		unsigned int old_freq = policy->cur;
1510  
1511  		/* Are we running at unknown frequency ? */
1512  		ret = cpufreq_frequency_table_get_index(policy, old_freq);
1513  		if (ret == -EINVAL) {
1514  			ret = __cpufreq_driver_target(policy, old_freq - 1,
1515  						      CPUFREQ_RELATION_L);
1516  
1517  			/*
1518  			 * Reaching here after boot in a few seconds may not
1519  			 * mean that system will remain stable at "unknown"
1520  			 * frequency for longer duration. Hence, a BUG_ON().
1521  			 */
1522  			BUG_ON(ret);
1523  			pr_info("%s: CPU%d: Running at unlisted initial frequency: %u KHz, changing to: %u KHz\n",
1524  				__func__, policy->cpu, old_freq, policy->cur);
1525  		}
1526  	}
1527  
1528  	if (new_policy) {
1529  		ret = cpufreq_add_dev_interface(policy);
1530  		if (ret)
1531  			goto out_destroy_policy;
1532  
1533  		cpufreq_stats_create_table(policy);
1534  
1535  		write_lock_irqsave(&cpufreq_driver_lock, flags);
1536  		list_add(&policy->policy_list, &cpufreq_policy_list);
1537  		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1538  
1539  		/*
1540  		 * Register with the energy model before
1541  		 * sugov_eas_rebuild_sd() is called, which will result
1542  		 * in rebuilding of the sched domains, which should only be done
1543  		 * once the energy model is properly initialized for the policy
1544  		 * first.
1545  		 *
1546  		 * Also, this should be called before the policy is registered
1547  		 * with cooling framework.
1548  		 */
1549  		if (cpufreq_driver->register_em)
1550  			cpufreq_driver->register_em(policy);
1551  	}
1552  
1553  	ret = cpufreq_init_policy(policy);
1554  	if (ret) {
1555  		pr_err("%s: Failed to initialize policy for cpu: %d (%d)\n",
1556  		       __func__, cpu, ret);
1557  		goto out_destroy_policy;
1558  	}
1559  
1560  	up_write(&policy->rwsem);
1561  
1562  	kobject_uevent(&policy->kobj, KOBJ_ADD);
1563  
1564  	/* Callback for handling stuff after policy is ready */
1565  	if (cpufreq_driver->ready)
1566  		cpufreq_driver->ready(policy);
1567  
1568  	/* Register cpufreq cooling only for a new policy */
1569  	if (new_policy && cpufreq_thermal_control_enabled(cpufreq_driver))
1570  		policy->cdev = of_cpufreq_cooling_register(policy);
1571  
1572  	pr_debug("initialization complete\n");
1573  
1574  	return 0;
1575  
1576  out_destroy_policy:
1577  	for_each_cpu(j, policy->real_cpus)
1578  		remove_cpu_dev_symlink(policy, j, get_cpu_device(j));
1579  
1580  out_offline_policy:
1581  	if (cpufreq_driver->offline)
1582  		cpufreq_driver->offline(policy);
1583  
1584  out_exit_policy:
1585  	if (cpufreq_driver->exit)
1586  		cpufreq_driver->exit(policy);
1587  
1588  out_free_policy:
1589  	cpumask_clear(policy->cpus);
1590  	up_write(&policy->rwsem);
1591  
1592  	cpufreq_policy_free(policy);
1593  	return ret;
1594  }
1595  
1596  /**
1597   * cpufreq_add_dev - the cpufreq interface for a CPU device.
1598   * @dev: CPU device.
1599   * @sif: Subsystem interface structure pointer (not used)
1600   */
cpufreq_add_dev(struct device * dev,struct subsys_interface * sif)1601  static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1602  {
1603  	struct cpufreq_policy *policy;
1604  	unsigned cpu = dev->id;
1605  	int ret;
1606  
1607  	dev_dbg(dev, "%s: adding CPU%u\n", __func__, cpu);
1608  
1609  	if (cpu_online(cpu)) {
1610  		ret = cpufreq_online(cpu);
1611  		if (ret)
1612  			return ret;
1613  	}
1614  
1615  	/* Create sysfs link on CPU registration */
1616  	policy = per_cpu(cpufreq_cpu_data, cpu);
1617  	if (policy)
1618  		add_cpu_dev_symlink(policy, cpu, dev);
1619  
1620  	return 0;
1621  }
1622  
__cpufreq_offline(unsigned int cpu,struct cpufreq_policy * policy)1623  static void __cpufreq_offline(unsigned int cpu, struct cpufreq_policy *policy)
1624  {
1625  	int ret;
1626  
1627  	if (has_target())
1628  		cpufreq_stop_governor(policy);
1629  
1630  	cpumask_clear_cpu(cpu, policy->cpus);
1631  
1632  	if (!policy_is_inactive(policy)) {
1633  		/* Nominate a new CPU if necessary. */
1634  		if (cpu == policy->cpu)
1635  			policy->cpu = cpumask_any(policy->cpus);
1636  
1637  		/* Start the governor again for the active policy. */
1638  		if (has_target()) {
1639  			ret = cpufreq_start_governor(policy);
1640  			if (ret)
1641  				pr_err("%s: Failed to start governor\n", __func__);
1642  		}
1643  
1644  		return;
1645  	}
1646  
1647  	if (has_target())
1648  		strscpy(policy->last_governor, policy->governor->name,
1649  			CPUFREQ_NAME_LEN);
1650  	else
1651  		policy->last_policy = policy->policy;
1652  
1653  	if (has_target())
1654  		cpufreq_exit_governor(policy);
1655  
1656  	/*
1657  	 * Perform the ->offline() during light-weight tear-down, as
1658  	 * that allows fast recovery when the CPU comes back.
1659  	 */
1660  	if (cpufreq_driver->offline) {
1661  		cpufreq_driver->offline(policy);
1662  		return;
1663  	}
1664  
1665  	if (cpufreq_driver->exit)
1666  		cpufreq_driver->exit(policy);
1667  
1668  	policy->freq_table = NULL;
1669  }
1670  
cpufreq_offline(unsigned int cpu)1671  static int cpufreq_offline(unsigned int cpu)
1672  {
1673  	struct cpufreq_policy *policy;
1674  
1675  	pr_debug("%s: unregistering CPU %u\n", __func__, cpu);
1676  
1677  	policy = cpufreq_cpu_get_raw(cpu);
1678  	if (!policy) {
1679  		pr_debug("%s: No cpu_data found\n", __func__);
1680  		return 0;
1681  	}
1682  
1683  	down_write(&policy->rwsem);
1684  
1685  	__cpufreq_offline(cpu, policy);
1686  
1687  	up_write(&policy->rwsem);
1688  	return 0;
1689  }
1690  
1691  /*
1692   * cpufreq_remove_dev - remove a CPU device
1693   *
1694   * Removes the cpufreq interface for a CPU device.
1695   */
cpufreq_remove_dev(struct device * dev,struct subsys_interface * sif)1696  static void cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1697  {
1698  	unsigned int cpu = dev->id;
1699  	struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1700  
1701  	if (!policy)
1702  		return;
1703  
1704  	down_write(&policy->rwsem);
1705  
1706  	if (cpu_online(cpu))
1707  		__cpufreq_offline(cpu, policy);
1708  
1709  	remove_cpu_dev_symlink(policy, cpu, dev);
1710  
1711  	if (!cpumask_empty(policy->real_cpus)) {
1712  		up_write(&policy->rwsem);
1713  		return;
1714  	}
1715  
1716  	/*
1717  	 * Unregister cpufreq cooling once all the CPUs of the policy are
1718  	 * removed.
1719  	 */
1720  	if (cpufreq_thermal_control_enabled(cpufreq_driver)) {
1721  		cpufreq_cooling_unregister(policy->cdev);
1722  		policy->cdev = NULL;
1723  	}
1724  
1725  	/* We did light-weight exit earlier, do full tear down now */
1726  	if (cpufreq_driver->offline && cpufreq_driver->exit)
1727  		cpufreq_driver->exit(policy);
1728  
1729  	up_write(&policy->rwsem);
1730  
1731  	cpufreq_policy_free(policy);
1732  }
1733  
1734  /**
1735   * cpufreq_out_of_sync - Fix up actual and saved CPU frequency difference.
1736   * @policy: Policy managing CPUs.
1737   * @new_freq: New CPU frequency.
1738   *
1739   * Adjust to the current frequency first and clean up later by either calling
1740   * cpufreq_update_policy(), or scheduling handle_update().
1741   */
cpufreq_out_of_sync(struct cpufreq_policy * policy,unsigned int new_freq)1742  static void cpufreq_out_of_sync(struct cpufreq_policy *policy,
1743  				unsigned int new_freq)
1744  {
1745  	struct cpufreq_freqs freqs;
1746  
1747  	pr_debug("Warning: CPU frequency out of sync: cpufreq and timing core thinks of %u, is %u kHz\n",
1748  		 policy->cur, new_freq);
1749  
1750  	freqs.old = policy->cur;
1751  	freqs.new = new_freq;
1752  
1753  	cpufreq_freq_transition_begin(policy, &freqs);
1754  	cpufreq_freq_transition_end(policy, &freqs, 0);
1755  }
1756  
cpufreq_verify_current_freq(struct cpufreq_policy * policy,bool update)1757  static unsigned int cpufreq_verify_current_freq(struct cpufreq_policy *policy, bool update)
1758  {
1759  	unsigned int new_freq;
1760  
1761  	new_freq = cpufreq_driver->get(policy->cpu);
1762  	if (!new_freq)
1763  		return 0;
1764  
1765  	/*
1766  	 * If fast frequency switching is used with the given policy, the check
1767  	 * against policy->cur is pointless, so skip it in that case.
1768  	 */
1769  	if (policy->fast_switch_enabled || !has_target())
1770  		return new_freq;
1771  
1772  	if (policy->cur != new_freq) {
1773  		/*
1774  		 * For some platforms, the frequency returned by hardware may be
1775  		 * slightly different from what is provided in the frequency
1776  		 * table, for example hardware may return 499 MHz instead of 500
1777  		 * MHz. In such cases it is better to avoid getting into
1778  		 * unnecessary frequency updates.
1779  		 */
1780  		if (abs(policy->cur - new_freq) < KHZ_PER_MHZ)
1781  			return policy->cur;
1782  
1783  		cpufreq_out_of_sync(policy, new_freq);
1784  		if (update)
1785  			schedule_work(&policy->update);
1786  	}
1787  
1788  	return new_freq;
1789  }
1790  
1791  /**
1792   * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur
1793   * @cpu: CPU number
1794   *
1795   * This is the last known freq, without actually getting it from the driver.
1796   * Return value will be same as what is shown in scaling_cur_freq in sysfs.
1797   */
cpufreq_quick_get(unsigned int cpu)1798  unsigned int cpufreq_quick_get(unsigned int cpu)
1799  {
1800  	struct cpufreq_policy *policy;
1801  	unsigned int ret_freq = 0;
1802  	unsigned long flags;
1803  
1804  	read_lock_irqsave(&cpufreq_driver_lock, flags);
1805  
1806  	if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get) {
1807  		ret_freq = cpufreq_driver->get(cpu);
1808  		read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1809  		return ret_freq;
1810  	}
1811  
1812  	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1813  
1814  	policy = cpufreq_cpu_get(cpu);
1815  	if (policy) {
1816  		ret_freq = policy->cur;
1817  		cpufreq_cpu_put(policy);
1818  	}
1819  
1820  	return ret_freq;
1821  }
1822  EXPORT_SYMBOL(cpufreq_quick_get);
1823  
1824  /**
1825   * cpufreq_quick_get_max - get the max reported CPU frequency for this CPU
1826   * @cpu: CPU number
1827   *
1828   * Just return the max possible frequency for a given CPU.
1829   */
cpufreq_quick_get_max(unsigned int cpu)1830  unsigned int cpufreq_quick_get_max(unsigned int cpu)
1831  {
1832  	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1833  	unsigned int ret_freq = 0;
1834  
1835  	if (policy) {
1836  		ret_freq = policy->max;
1837  		cpufreq_cpu_put(policy);
1838  	}
1839  
1840  	return ret_freq;
1841  }
1842  EXPORT_SYMBOL(cpufreq_quick_get_max);
1843  
1844  /**
1845   * cpufreq_get_hw_max_freq - get the max hardware frequency of the CPU
1846   * @cpu: CPU number
1847   *
1848   * The default return value is the max_freq field of cpuinfo.
1849   */
cpufreq_get_hw_max_freq(unsigned int cpu)1850  __weak unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
1851  {
1852  	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1853  	unsigned int ret_freq = 0;
1854  
1855  	if (policy) {
1856  		ret_freq = policy->cpuinfo.max_freq;
1857  		cpufreq_cpu_put(policy);
1858  	}
1859  
1860  	return ret_freq;
1861  }
1862  EXPORT_SYMBOL(cpufreq_get_hw_max_freq);
1863  
__cpufreq_get(struct cpufreq_policy * policy)1864  static unsigned int __cpufreq_get(struct cpufreq_policy *policy)
1865  {
1866  	if (unlikely(policy_is_inactive(policy)))
1867  		return 0;
1868  
1869  	return cpufreq_verify_current_freq(policy, true);
1870  }
1871  
1872  /**
1873   * cpufreq_get - get the current CPU frequency (in kHz)
1874   * @cpu: CPU number
1875   *
1876   * Get the CPU current (static) CPU frequency
1877   */
cpufreq_get(unsigned int cpu)1878  unsigned int cpufreq_get(unsigned int cpu)
1879  {
1880  	struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
1881  	unsigned int ret_freq = 0;
1882  
1883  	if (policy) {
1884  		down_read(&policy->rwsem);
1885  		if (cpufreq_driver->get)
1886  			ret_freq = __cpufreq_get(policy);
1887  		up_read(&policy->rwsem);
1888  
1889  		cpufreq_cpu_put(policy);
1890  	}
1891  
1892  	return ret_freq;
1893  }
1894  EXPORT_SYMBOL(cpufreq_get);
1895  
1896  static struct subsys_interface cpufreq_interface = {
1897  	.name		= "cpufreq",
1898  	.subsys		= &cpu_subsys,
1899  	.add_dev	= cpufreq_add_dev,
1900  	.remove_dev	= cpufreq_remove_dev,
1901  };
1902  
1903  /*
1904   * In case platform wants some specific frequency to be configured
1905   * during suspend..
1906   */
cpufreq_generic_suspend(struct cpufreq_policy * policy)1907  int cpufreq_generic_suspend(struct cpufreq_policy *policy)
1908  {
1909  	int ret;
1910  
1911  	if (!policy->suspend_freq) {
1912  		pr_debug("%s: suspend_freq not defined\n", __func__);
1913  		return 0;
1914  	}
1915  
1916  	pr_debug("%s: Setting suspend-freq: %u\n", __func__,
1917  			policy->suspend_freq);
1918  
1919  	ret = __cpufreq_driver_target(policy, policy->suspend_freq,
1920  			CPUFREQ_RELATION_H);
1921  	if (ret)
1922  		pr_err("%s: unable to set suspend-freq: %u. err: %d\n",
1923  				__func__, policy->suspend_freq, ret);
1924  
1925  	return ret;
1926  }
1927  EXPORT_SYMBOL(cpufreq_generic_suspend);
1928  
1929  /**
1930   * cpufreq_suspend() - Suspend CPUFreq governors.
1931   *
1932   * Called during system wide Suspend/Hibernate cycles for suspending governors
1933   * as some platforms can't change frequency after this point in suspend cycle.
1934   * Because some of the devices (like: i2c, regulators, etc) they use for
1935   * changing frequency are suspended quickly after this point.
1936   */
cpufreq_suspend(void)1937  void cpufreq_suspend(void)
1938  {
1939  	struct cpufreq_policy *policy;
1940  
1941  	if (!cpufreq_driver)
1942  		return;
1943  
1944  	if (!has_target() && !cpufreq_driver->suspend)
1945  		goto suspend;
1946  
1947  	pr_debug("%s: Suspending Governors\n", __func__);
1948  
1949  	for_each_active_policy(policy) {
1950  		if (has_target()) {
1951  			down_write(&policy->rwsem);
1952  			cpufreq_stop_governor(policy);
1953  			up_write(&policy->rwsem);
1954  		}
1955  
1956  		if (cpufreq_driver->suspend && cpufreq_driver->suspend(policy))
1957  			pr_err("%s: Failed to suspend driver: %s\n", __func__,
1958  				cpufreq_driver->name);
1959  	}
1960  
1961  suspend:
1962  	cpufreq_suspended = true;
1963  }
1964  
1965  /**
1966   * cpufreq_resume() - Resume CPUFreq governors.
1967   *
1968   * Called during system wide Suspend/Hibernate cycle for resuming governors that
1969   * are suspended with cpufreq_suspend().
1970   */
cpufreq_resume(void)1971  void cpufreq_resume(void)
1972  {
1973  	struct cpufreq_policy *policy;
1974  	int ret;
1975  
1976  	if (!cpufreq_driver)
1977  		return;
1978  
1979  	if (unlikely(!cpufreq_suspended))
1980  		return;
1981  
1982  	cpufreq_suspended = false;
1983  
1984  	if (!has_target() && !cpufreq_driver->resume)
1985  		return;
1986  
1987  	pr_debug("%s: Resuming Governors\n", __func__);
1988  
1989  	for_each_active_policy(policy) {
1990  		if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) {
1991  			pr_err("%s: Failed to resume driver: %s\n", __func__,
1992  				cpufreq_driver->name);
1993  		} else if (has_target()) {
1994  			down_write(&policy->rwsem);
1995  			ret = cpufreq_start_governor(policy);
1996  			up_write(&policy->rwsem);
1997  
1998  			if (ret)
1999  				pr_err("%s: Failed to start governor for CPU%u's policy\n",
2000  				       __func__, policy->cpu);
2001  		}
2002  	}
2003  }
2004  
2005  /**
2006   * cpufreq_driver_test_flags - Test cpufreq driver's flags against given ones.
2007   * @flags: Flags to test against the current cpufreq driver's flags.
2008   *
2009   * Assumes that the driver is there, so callers must ensure that this is the
2010   * case.
2011   */
cpufreq_driver_test_flags(u16 flags)2012  bool cpufreq_driver_test_flags(u16 flags)
2013  {
2014  	return !!(cpufreq_driver->flags & flags);
2015  }
2016  
2017  /**
2018   * cpufreq_get_current_driver - Return the current driver's name.
2019   *
2020   * Return the name string of the currently registered cpufreq driver or NULL if
2021   * none.
2022   */
cpufreq_get_current_driver(void)2023  const char *cpufreq_get_current_driver(void)
2024  {
2025  	if (cpufreq_driver)
2026  		return cpufreq_driver->name;
2027  
2028  	return NULL;
2029  }
2030  EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
2031  
2032  /**
2033   * cpufreq_get_driver_data - Return current driver data.
2034   *
2035   * Return the private data of the currently registered cpufreq driver, or NULL
2036   * if no cpufreq driver has been registered.
2037   */
cpufreq_get_driver_data(void)2038  void *cpufreq_get_driver_data(void)
2039  {
2040  	if (cpufreq_driver)
2041  		return cpufreq_driver->driver_data;
2042  
2043  	return NULL;
2044  }
2045  EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
2046  
2047  /*********************************************************************
2048   *                     NOTIFIER LISTS INTERFACE                      *
2049   *********************************************************************/
2050  
2051  /**
2052   * cpufreq_register_notifier - Register a notifier with cpufreq.
2053   * @nb: notifier function to register.
2054   * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2055   *
2056   * Add a notifier to one of two lists: either a list of notifiers that run on
2057   * clock rate changes (once before and once after every transition), or a list
2058   * of notifiers that ron on cpufreq policy changes.
2059   *
2060   * This function may sleep and it has the same return values as
2061   * blocking_notifier_chain_register().
2062   */
cpufreq_register_notifier(struct notifier_block * nb,unsigned int list)2063  int cpufreq_register_notifier(struct notifier_block *nb, unsigned int list)
2064  {
2065  	int ret;
2066  
2067  	if (cpufreq_disabled())
2068  		return -EINVAL;
2069  
2070  	switch (list) {
2071  	case CPUFREQ_TRANSITION_NOTIFIER:
2072  		mutex_lock(&cpufreq_fast_switch_lock);
2073  
2074  		if (cpufreq_fast_switch_count > 0) {
2075  			mutex_unlock(&cpufreq_fast_switch_lock);
2076  			return -EBUSY;
2077  		}
2078  		ret = srcu_notifier_chain_register(
2079  				&cpufreq_transition_notifier_list, nb);
2080  		if (!ret)
2081  			cpufreq_fast_switch_count--;
2082  
2083  		mutex_unlock(&cpufreq_fast_switch_lock);
2084  		break;
2085  	case CPUFREQ_POLICY_NOTIFIER:
2086  		ret = blocking_notifier_chain_register(
2087  				&cpufreq_policy_notifier_list, nb);
2088  		break;
2089  	default:
2090  		ret = -EINVAL;
2091  	}
2092  
2093  	return ret;
2094  }
2095  EXPORT_SYMBOL(cpufreq_register_notifier);
2096  
2097  /**
2098   * cpufreq_unregister_notifier - Unregister a notifier from cpufreq.
2099   * @nb: notifier block to be unregistered.
2100   * @list: CPUFREQ_TRANSITION_NOTIFIER or CPUFREQ_POLICY_NOTIFIER.
2101   *
2102   * Remove a notifier from one of the cpufreq notifier lists.
2103   *
2104   * This function may sleep and it has the same return values as
2105   * blocking_notifier_chain_unregister().
2106   */
cpufreq_unregister_notifier(struct notifier_block * nb,unsigned int list)2107  int cpufreq_unregister_notifier(struct notifier_block *nb, unsigned int list)
2108  {
2109  	int ret;
2110  
2111  	if (cpufreq_disabled())
2112  		return -EINVAL;
2113  
2114  	switch (list) {
2115  	case CPUFREQ_TRANSITION_NOTIFIER:
2116  		mutex_lock(&cpufreq_fast_switch_lock);
2117  
2118  		ret = srcu_notifier_chain_unregister(
2119  				&cpufreq_transition_notifier_list, nb);
2120  		if (!ret && !WARN_ON(cpufreq_fast_switch_count >= 0))
2121  			cpufreq_fast_switch_count++;
2122  
2123  		mutex_unlock(&cpufreq_fast_switch_lock);
2124  		break;
2125  	case CPUFREQ_POLICY_NOTIFIER:
2126  		ret = blocking_notifier_chain_unregister(
2127  				&cpufreq_policy_notifier_list, nb);
2128  		break;
2129  	default:
2130  		ret = -EINVAL;
2131  	}
2132  
2133  	return ret;
2134  }
2135  EXPORT_SYMBOL(cpufreq_unregister_notifier);
2136  
2137  
2138  /*********************************************************************
2139   *                              GOVERNORS                            *
2140   *********************************************************************/
2141  
2142  /**
2143   * cpufreq_driver_fast_switch - Carry out a fast CPU frequency switch.
2144   * @policy: cpufreq policy to switch the frequency for.
2145   * @target_freq: New frequency to set (may be approximate).
2146   *
2147   * Carry out a fast frequency switch without sleeping.
2148   *
2149   * The driver's ->fast_switch() callback invoked by this function must be
2150   * suitable for being called from within RCU-sched read-side critical sections
2151   * and it is expected to select the minimum available frequency greater than or
2152   * equal to @target_freq (CPUFREQ_RELATION_L).
2153   *
2154   * This function must not be called if policy->fast_switch_enabled is unset.
2155   *
2156   * Governors calling this function must guarantee that it will never be invoked
2157   * twice in parallel for the same policy and that it will never be called in
2158   * parallel with either ->target() or ->target_index() for the same policy.
2159   *
2160   * Returns the actual frequency set for the CPU.
2161   *
2162   * If 0 is returned by the driver's ->fast_switch() callback to indicate an
2163   * error condition, the hardware configuration must be preserved.
2164   */
cpufreq_driver_fast_switch(struct cpufreq_policy * policy,unsigned int target_freq)2165  unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
2166  					unsigned int target_freq)
2167  {
2168  	unsigned int freq;
2169  	int cpu;
2170  
2171  	target_freq = clamp_val(target_freq, policy->min, policy->max);
2172  	freq = cpufreq_driver->fast_switch(policy, target_freq);
2173  
2174  	if (!freq)
2175  		return 0;
2176  
2177  	policy->cur = freq;
2178  	arch_set_freq_scale(policy->related_cpus, freq,
2179  			    arch_scale_freq_ref(policy->cpu));
2180  	cpufreq_stats_record_transition(policy, freq);
2181  
2182  	if (trace_cpu_frequency_enabled()) {
2183  		for_each_cpu(cpu, policy->cpus)
2184  			trace_cpu_frequency(freq, cpu);
2185  	}
2186  
2187  	return freq;
2188  }
2189  EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
2190  
2191  /**
2192   * cpufreq_driver_adjust_perf - Adjust CPU performance level in one go.
2193   * @cpu: Target CPU.
2194   * @min_perf: Minimum (required) performance level (units of @capacity).
2195   * @target_perf: Target (desired) performance level (units of @capacity).
2196   * @capacity: Capacity of the target CPU.
2197   *
2198   * Carry out a fast performance level switch of @cpu without sleeping.
2199   *
2200   * The driver's ->adjust_perf() callback invoked by this function must be
2201   * suitable for being called from within RCU-sched read-side critical sections
2202   * and it is expected to select a suitable performance level equal to or above
2203   * @min_perf and preferably equal to or below @target_perf.
2204   *
2205   * This function must not be called if policy->fast_switch_enabled is unset.
2206   *
2207   * Governors calling this function must guarantee that it will never be invoked
2208   * twice in parallel for the same CPU and that it will never be called in
2209   * parallel with either ->target() or ->target_index() or ->fast_switch() for
2210   * the same CPU.
2211   */
cpufreq_driver_adjust_perf(unsigned int cpu,unsigned long min_perf,unsigned long target_perf,unsigned long capacity)2212  void cpufreq_driver_adjust_perf(unsigned int cpu,
2213  				 unsigned long min_perf,
2214  				 unsigned long target_perf,
2215  				 unsigned long capacity)
2216  {
2217  	cpufreq_driver->adjust_perf(cpu, min_perf, target_perf, capacity);
2218  }
2219  
2220  /**
2221   * cpufreq_driver_has_adjust_perf - Check "direct fast switch" callback.
2222   *
2223   * Return 'true' if the ->adjust_perf callback is present for the
2224   * current driver or 'false' otherwise.
2225   */
cpufreq_driver_has_adjust_perf(void)2226  bool cpufreq_driver_has_adjust_perf(void)
2227  {
2228  	return !!cpufreq_driver->adjust_perf;
2229  }
2230  
2231  /* Must set freqs->new to intermediate frequency */
__target_intermediate(struct cpufreq_policy * policy,struct cpufreq_freqs * freqs,int index)2232  static int __target_intermediate(struct cpufreq_policy *policy,
2233  				 struct cpufreq_freqs *freqs, int index)
2234  {
2235  	int ret;
2236  
2237  	freqs->new = cpufreq_driver->get_intermediate(policy, index);
2238  
2239  	/* We don't need to switch to intermediate freq */
2240  	if (!freqs->new)
2241  		return 0;
2242  
2243  	pr_debug("%s: cpu: %d, switching to intermediate freq: oldfreq: %u, intermediate freq: %u\n",
2244  		 __func__, policy->cpu, freqs->old, freqs->new);
2245  
2246  	cpufreq_freq_transition_begin(policy, freqs);
2247  	ret = cpufreq_driver->target_intermediate(policy, index);
2248  	cpufreq_freq_transition_end(policy, freqs, ret);
2249  
2250  	if (ret)
2251  		pr_err("%s: Failed to change to intermediate frequency: %d\n",
2252  		       __func__, ret);
2253  
2254  	return ret;
2255  }
2256  
__target_index(struct cpufreq_policy * policy,int index)2257  static int __target_index(struct cpufreq_policy *policy, int index)
2258  {
2259  	struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0};
2260  	unsigned int restore_freq, intermediate_freq = 0;
2261  	unsigned int newfreq = policy->freq_table[index].frequency;
2262  	int retval = -EINVAL;
2263  	bool notify;
2264  
2265  	if (newfreq == policy->cur)
2266  		return 0;
2267  
2268  	/* Save last value to restore later on errors */
2269  	restore_freq = policy->cur;
2270  
2271  	notify = !(cpufreq_driver->flags & CPUFREQ_ASYNC_NOTIFICATION);
2272  	if (notify) {
2273  		/* Handle switching to intermediate frequency */
2274  		if (cpufreq_driver->get_intermediate) {
2275  			retval = __target_intermediate(policy, &freqs, index);
2276  			if (retval)
2277  				return retval;
2278  
2279  			intermediate_freq = freqs.new;
2280  			/* Set old freq to intermediate */
2281  			if (intermediate_freq)
2282  				freqs.old = freqs.new;
2283  		}
2284  
2285  		freqs.new = newfreq;
2286  		pr_debug("%s: cpu: %d, oldfreq: %u, new freq: %u\n",
2287  			 __func__, policy->cpu, freqs.old, freqs.new);
2288  
2289  		cpufreq_freq_transition_begin(policy, &freqs);
2290  	}
2291  
2292  	retval = cpufreq_driver->target_index(policy, index);
2293  	if (retval)
2294  		pr_err("%s: Failed to change cpu frequency: %d\n", __func__,
2295  		       retval);
2296  
2297  	if (notify) {
2298  		cpufreq_freq_transition_end(policy, &freqs, retval);
2299  
2300  		/*
2301  		 * Failed after setting to intermediate freq? Driver should have
2302  		 * reverted back to initial frequency and so should we. Check
2303  		 * here for intermediate_freq instead of get_intermediate, in
2304  		 * case we haven't switched to intermediate freq at all.
2305  		 */
2306  		if (unlikely(retval && intermediate_freq)) {
2307  			freqs.old = intermediate_freq;
2308  			freqs.new = restore_freq;
2309  			cpufreq_freq_transition_begin(policy, &freqs);
2310  			cpufreq_freq_transition_end(policy, &freqs, 0);
2311  		}
2312  	}
2313  
2314  	return retval;
2315  }
2316  
__cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)2317  int __cpufreq_driver_target(struct cpufreq_policy *policy,
2318  			    unsigned int target_freq,
2319  			    unsigned int relation)
2320  {
2321  	unsigned int old_target_freq = target_freq;
2322  
2323  	if (cpufreq_disabled())
2324  		return -ENODEV;
2325  
2326  	target_freq = __resolve_freq(policy, target_freq, relation);
2327  
2328  	pr_debug("target for CPU %u: %u kHz, relation %u, requested %u kHz\n",
2329  		 policy->cpu, target_freq, relation, old_target_freq);
2330  
2331  	/*
2332  	 * This might look like a redundant call as we are checking it again
2333  	 * after finding index. But it is left intentionally for cases where
2334  	 * exactly same freq is called again and so we can save on few function
2335  	 * calls.
2336  	 */
2337  	if (target_freq == policy->cur &&
2338  	    !(cpufreq_driver->flags & CPUFREQ_NEED_UPDATE_LIMITS))
2339  		return 0;
2340  
2341  	if (cpufreq_driver->target) {
2342  		/*
2343  		 * If the driver hasn't setup a single inefficient frequency,
2344  		 * it's unlikely it knows how to decode CPUFREQ_RELATION_E.
2345  		 */
2346  		if (!policy->efficiencies_available)
2347  			relation &= ~CPUFREQ_RELATION_E;
2348  
2349  		return cpufreq_driver->target(policy, target_freq, relation);
2350  	}
2351  
2352  	if (!cpufreq_driver->target_index)
2353  		return -EINVAL;
2354  
2355  	return __target_index(policy, policy->cached_resolved_idx);
2356  }
2357  EXPORT_SYMBOL_GPL(__cpufreq_driver_target);
2358  
cpufreq_driver_target(struct cpufreq_policy * policy,unsigned int target_freq,unsigned int relation)2359  int cpufreq_driver_target(struct cpufreq_policy *policy,
2360  			  unsigned int target_freq,
2361  			  unsigned int relation)
2362  {
2363  	int ret;
2364  
2365  	down_write(&policy->rwsem);
2366  
2367  	ret = __cpufreq_driver_target(policy, target_freq, relation);
2368  
2369  	up_write(&policy->rwsem);
2370  
2371  	return ret;
2372  }
2373  EXPORT_SYMBOL_GPL(cpufreq_driver_target);
2374  
cpufreq_fallback_governor(void)2375  __weak struct cpufreq_governor *cpufreq_fallback_governor(void)
2376  {
2377  	return NULL;
2378  }
2379  
cpufreq_init_governor(struct cpufreq_policy * policy)2380  static int cpufreq_init_governor(struct cpufreq_policy *policy)
2381  {
2382  	int ret;
2383  
2384  	/* Don't start any governor operations if we are entering suspend */
2385  	if (cpufreq_suspended)
2386  		return 0;
2387  	/*
2388  	 * Governor might not be initiated here if ACPI _PPC changed
2389  	 * notification happened, so check it.
2390  	 */
2391  	if (!policy->governor)
2392  		return -EINVAL;
2393  
2394  	/* Platform doesn't want dynamic frequency switching ? */
2395  	if (policy->governor->flags & CPUFREQ_GOV_DYNAMIC_SWITCHING &&
2396  	    cpufreq_driver->flags & CPUFREQ_NO_AUTO_DYNAMIC_SWITCHING) {
2397  		struct cpufreq_governor *gov = cpufreq_fallback_governor();
2398  
2399  		if (gov) {
2400  			pr_warn("Can't use %s governor as dynamic switching is disallowed. Fallback to %s governor\n",
2401  				policy->governor->name, gov->name);
2402  			policy->governor = gov;
2403  		} else {
2404  			return -EINVAL;
2405  		}
2406  	}
2407  
2408  	if (!try_module_get(policy->governor->owner))
2409  		return -EINVAL;
2410  
2411  	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2412  
2413  	if (policy->governor->init) {
2414  		ret = policy->governor->init(policy);
2415  		if (ret) {
2416  			module_put(policy->governor->owner);
2417  			return ret;
2418  		}
2419  	}
2420  
2421  	policy->strict_target = !!(policy->governor->flags & CPUFREQ_GOV_STRICT_TARGET);
2422  
2423  	return 0;
2424  }
2425  
cpufreq_exit_governor(struct cpufreq_policy * policy)2426  static void cpufreq_exit_governor(struct cpufreq_policy *policy)
2427  {
2428  	if (cpufreq_suspended || !policy->governor)
2429  		return;
2430  
2431  	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2432  
2433  	if (policy->governor->exit)
2434  		policy->governor->exit(policy);
2435  
2436  	module_put(policy->governor->owner);
2437  }
2438  
cpufreq_start_governor(struct cpufreq_policy * policy)2439  int cpufreq_start_governor(struct cpufreq_policy *policy)
2440  {
2441  	int ret;
2442  
2443  	if (cpufreq_suspended)
2444  		return 0;
2445  
2446  	if (!policy->governor)
2447  		return -EINVAL;
2448  
2449  	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2450  
2451  	if (cpufreq_driver->get)
2452  		cpufreq_verify_current_freq(policy, false);
2453  
2454  	if (policy->governor->start) {
2455  		ret = policy->governor->start(policy);
2456  		if (ret)
2457  			return ret;
2458  	}
2459  
2460  	if (policy->governor->limits)
2461  		policy->governor->limits(policy);
2462  
2463  	return 0;
2464  }
2465  
cpufreq_stop_governor(struct cpufreq_policy * policy)2466  void cpufreq_stop_governor(struct cpufreq_policy *policy)
2467  {
2468  	if (cpufreq_suspended || !policy->governor)
2469  		return;
2470  
2471  	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2472  
2473  	if (policy->governor->stop)
2474  		policy->governor->stop(policy);
2475  }
2476  
cpufreq_governor_limits(struct cpufreq_policy * policy)2477  static void cpufreq_governor_limits(struct cpufreq_policy *policy)
2478  {
2479  	if (cpufreq_suspended || !policy->governor)
2480  		return;
2481  
2482  	pr_debug("%s: for CPU %u\n", __func__, policy->cpu);
2483  
2484  	if (policy->governor->limits)
2485  		policy->governor->limits(policy);
2486  }
2487  
cpufreq_register_governor(struct cpufreq_governor * governor)2488  int cpufreq_register_governor(struct cpufreq_governor *governor)
2489  {
2490  	int err;
2491  
2492  	if (!governor)
2493  		return -EINVAL;
2494  
2495  	if (cpufreq_disabled())
2496  		return -ENODEV;
2497  
2498  	mutex_lock(&cpufreq_governor_mutex);
2499  
2500  	err = -EBUSY;
2501  	if (!find_governor(governor->name)) {
2502  		err = 0;
2503  		list_add(&governor->governor_list, &cpufreq_governor_list);
2504  	}
2505  
2506  	mutex_unlock(&cpufreq_governor_mutex);
2507  	return err;
2508  }
2509  EXPORT_SYMBOL_GPL(cpufreq_register_governor);
2510  
cpufreq_unregister_governor(struct cpufreq_governor * governor)2511  void cpufreq_unregister_governor(struct cpufreq_governor *governor)
2512  {
2513  	struct cpufreq_policy *policy;
2514  	unsigned long flags;
2515  
2516  	if (!governor)
2517  		return;
2518  
2519  	if (cpufreq_disabled())
2520  		return;
2521  
2522  	/* clear last_governor for all inactive policies */
2523  	read_lock_irqsave(&cpufreq_driver_lock, flags);
2524  	for_each_inactive_policy(policy) {
2525  		if (!strcmp(policy->last_governor, governor->name)) {
2526  			policy->governor = NULL;
2527  			strcpy(policy->last_governor, "\0");
2528  		}
2529  	}
2530  	read_unlock_irqrestore(&cpufreq_driver_lock, flags);
2531  
2532  	mutex_lock(&cpufreq_governor_mutex);
2533  	list_del(&governor->governor_list);
2534  	mutex_unlock(&cpufreq_governor_mutex);
2535  }
2536  EXPORT_SYMBOL_GPL(cpufreq_unregister_governor);
2537  
2538  
2539  /*********************************************************************
2540   *                          POLICY INTERFACE                         *
2541   *********************************************************************/
2542  
2543  /**
2544   * cpufreq_get_policy - get the current cpufreq_policy
2545   * @policy: struct cpufreq_policy into which the current cpufreq_policy
2546   *	is written
2547   * @cpu: CPU to find the policy for
2548   *
2549   * Reads the current cpufreq policy.
2550   */
cpufreq_get_policy(struct cpufreq_policy * policy,unsigned int cpu)2551  int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu)
2552  {
2553  	struct cpufreq_policy *cpu_policy;
2554  	if (!policy)
2555  		return -EINVAL;
2556  
2557  	cpu_policy = cpufreq_cpu_get(cpu);
2558  	if (!cpu_policy)
2559  		return -EINVAL;
2560  
2561  	memcpy(policy, cpu_policy, sizeof(*policy));
2562  
2563  	cpufreq_cpu_put(cpu_policy);
2564  	return 0;
2565  }
2566  EXPORT_SYMBOL(cpufreq_get_policy);
2567  
2568  DEFINE_PER_CPU(unsigned long, cpufreq_pressure);
2569  
2570  /**
2571   * cpufreq_update_pressure() - Update cpufreq pressure for CPUs
2572   * @policy: cpufreq policy of the CPUs.
2573   *
2574   * Update the value of cpufreq pressure for all @cpus in the policy.
2575   */
cpufreq_update_pressure(struct cpufreq_policy * policy)2576  static void cpufreq_update_pressure(struct cpufreq_policy *policy)
2577  {
2578  	unsigned long max_capacity, capped_freq, pressure;
2579  	u32 max_freq;
2580  	int cpu;
2581  
2582  	cpu = cpumask_first(policy->related_cpus);
2583  	max_freq = arch_scale_freq_ref(cpu);
2584  	capped_freq = policy->max;
2585  
2586  	/*
2587  	 * Handle properly the boost frequencies, which should simply clean
2588  	 * the cpufreq pressure value.
2589  	 */
2590  	if (max_freq <= capped_freq) {
2591  		pressure = 0;
2592  	} else {
2593  		max_capacity = arch_scale_cpu_capacity(cpu);
2594  		pressure = max_capacity -
2595  			   mult_frac(max_capacity, capped_freq, max_freq);
2596  	}
2597  
2598  	for_each_cpu(cpu, policy->related_cpus)
2599  		WRITE_ONCE(per_cpu(cpufreq_pressure, cpu), pressure);
2600  }
2601  
2602  /**
2603   * cpufreq_set_policy - Modify cpufreq policy parameters.
2604   * @policy: Policy object to modify.
2605   * @new_gov: Policy governor pointer.
2606   * @new_pol: Policy value (for drivers with built-in governors).
2607   *
2608   * Invoke the cpufreq driver's ->verify() callback to sanity-check the frequency
2609   * limits to be set for the policy, update @policy with the verified limits
2610   * values and either invoke the driver's ->setpolicy() callback (if present) or
2611   * carry out a governor update for @policy.  That is, run the current governor's
2612   * ->limits() callback (if @new_gov points to the same object as the one in
2613   * @policy) or replace the governor for @policy with @new_gov.
2614   *
2615   * The cpuinfo part of @policy is not updated by this function.
2616   */
cpufreq_set_policy(struct cpufreq_policy * policy,struct cpufreq_governor * new_gov,unsigned int new_pol)2617  static int cpufreq_set_policy(struct cpufreq_policy *policy,
2618  			      struct cpufreq_governor *new_gov,
2619  			      unsigned int new_pol)
2620  {
2621  	struct cpufreq_policy_data new_data;
2622  	struct cpufreq_governor *old_gov;
2623  	int ret;
2624  
2625  	memcpy(&new_data.cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo));
2626  	new_data.freq_table = policy->freq_table;
2627  	new_data.cpu = policy->cpu;
2628  	/*
2629  	 * PM QoS framework collects all the requests from users and provide us
2630  	 * the final aggregated value here.
2631  	 */
2632  	new_data.min = freq_qos_read_value(&policy->constraints, FREQ_QOS_MIN);
2633  	new_data.max = freq_qos_read_value(&policy->constraints, FREQ_QOS_MAX);
2634  
2635  	pr_debug("setting new policy for CPU %u: %u - %u kHz\n",
2636  		 new_data.cpu, new_data.min, new_data.max);
2637  
2638  	/*
2639  	 * Verify that the CPU speed can be set within these limits and make sure
2640  	 * that min <= max.
2641  	 */
2642  	ret = cpufreq_driver->verify(&new_data);
2643  	if (ret)
2644  		return ret;
2645  
2646  	/*
2647  	 * Resolve policy min/max to available frequencies. It ensures
2648  	 * no frequency resolution will neither overshoot the requested maximum
2649  	 * nor undershoot the requested minimum.
2650  	 */
2651  	policy->min = new_data.min;
2652  	policy->max = new_data.max;
2653  	policy->min = __resolve_freq(policy, policy->min, CPUFREQ_RELATION_L);
2654  	policy->max = __resolve_freq(policy, policy->max, CPUFREQ_RELATION_H);
2655  	trace_cpu_frequency_limits(policy);
2656  
2657  	cpufreq_update_pressure(policy);
2658  
2659  	policy->cached_target_freq = UINT_MAX;
2660  
2661  	pr_debug("new min and max freqs are %u - %u kHz\n",
2662  		 policy->min, policy->max);
2663  
2664  	if (cpufreq_driver->setpolicy) {
2665  		policy->policy = new_pol;
2666  		pr_debug("setting range\n");
2667  		return cpufreq_driver->setpolicy(policy);
2668  	}
2669  
2670  	if (new_gov == policy->governor) {
2671  		pr_debug("governor limits update\n");
2672  		cpufreq_governor_limits(policy);
2673  		return 0;
2674  	}
2675  
2676  	pr_debug("governor switch\n");
2677  
2678  	/* save old, working values */
2679  	old_gov = policy->governor;
2680  	/* end old governor */
2681  	if (old_gov) {
2682  		cpufreq_stop_governor(policy);
2683  		cpufreq_exit_governor(policy);
2684  	}
2685  
2686  	/* start new governor */
2687  	policy->governor = new_gov;
2688  	ret = cpufreq_init_governor(policy);
2689  	if (!ret) {
2690  		ret = cpufreq_start_governor(policy);
2691  		if (!ret) {
2692  			pr_debug("governor change\n");
2693  			return 0;
2694  		}
2695  		cpufreq_exit_governor(policy);
2696  	}
2697  
2698  	/* new governor failed, so re-start old one */
2699  	pr_debug("starting governor %s failed\n", policy->governor->name);
2700  	if (old_gov) {
2701  		policy->governor = old_gov;
2702  		if (cpufreq_init_governor(policy))
2703  			policy->governor = NULL;
2704  		else
2705  			cpufreq_start_governor(policy);
2706  	}
2707  
2708  	return ret;
2709  }
2710  
2711  /**
2712   * cpufreq_update_policy - Re-evaluate an existing cpufreq policy.
2713   * @cpu: CPU to re-evaluate the policy for.
2714   *
2715   * Update the current frequency for the cpufreq policy of @cpu and use
2716   * cpufreq_set_policy() to re-apply the min and max limits, which triggers the
2717   * evaluation of policy notifiers and the cpufreq driver's ->verify() callback
2718   * for the policy in question, among other things.
2719   */
cpufreq_update_policy(unsigned int cpu)2720  void cpufreq_update_policy(unsigned int cpu)
2721  {
2722  	struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
2723  
2724  	if (!policy)
2725  		return;
2726  
2727  	/*
2728  	 * BIOS might change freq behind our back
2729  	 * -> ask driver for current freq and notify governors about a change
2730  	 */
2731  	if (cpufreq_driver->get && has_target() &&
2732  	    (cpufreq_suspended || WARN_ON(!cpufreq_verify_current_freq(policy, false))))
2733  		goto unlock;
2734  
2735  	refresh_frequency_limits(policy);
2736  
2737  unlock:
2738  	cpufreq_cpu_release(policy);
2739  }
2740  EXPORT_SYMBOL(cpufreq_update_policy);
2741  
2742  /**
2743   * cpufreq_update_limits - Update policy limits for a given CPU.
2744   * @cpu: CPU to update the policy limits for.
2745   *
2746   * Invoke the driver's ->update_limits callback if present or call
2747   * cpufreq_update_policy() for @cpu.
2748   */
cpufreq_update_limits(unsigned int cpu)2749  void cpufreq_update_limits(unsigned int cpu)
2750  {
2751  	if (cpufreq_driver->update_limits)
2752  		cpufreq_driver->update_limits(cpu);
2753  	else
2754  		cpufreq_update_policy(cpu);
2755  }
2756  EXPORT_SYMBOL_GPL(cpufreq_update_limits);
2757  
2758  /*********************************************************************
2759   *               BOOST						     *
2760   *********************************************************************/
cpufreq_boost_set_sw(struct cpufreq_policy * policy,int state)2761  static int cpufreq_boost_set_sw(struct cpufreq_policy *policy, int state)
2762  {
2763  	int ret;
2764  
2765  	if (!policy->freq_table)
2766  		return -ENXIO;
2767  
2768  	ret = cpufreq_frequency_table_cpuinfo(policy, policy->freq_table);
2769  	if (ret) {
2770  		pr_err("%s: Policy frequency update failed\n", __func__);
2771  		return ret;
2772  	}
2773  
2774  	ret = freq_qos_update_request(policy->max_freq_req, policy->max);
2775  	if (ret < 0)
2776  		return ret;
2777  
2778  	return 0;
2779  }
2780  
cpufreq_boost_trigger_state(int state)2781  int cpufreq_boost_trigger_state(int state)
2782  {
2783  	struct cpufreq_policy *policy;
2784  	unsigned long flags;
2785  	int ret = 0;
2786  
2787  	if (cpufreq_driver->boost_enabled == state)
2788  		return 0;
2789  
2790  	write_lock_irqsave(&cpufreq_driver_lock, flags);
2791  	cpufreq_driver->boost_enabled = state;
2792  	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2793  
2794  	cpus_read_lock();
2795  	for_each_active_policy(policy) {
2796  		policy->boost_enabled = state;
2797  		ret = cpufreq_driver->set_boost(policy, state);
2798  		if (ret) {
2799  			policy->boost_enabled = !policy->boost_enabled;
2800  			goto err_reset_state;
2801  		}
2802  	}
2803  	cpus_read_unlock();
2804  
2805  	return 0;
2806  
2807  err_reset_state:
2808  	cpus_read_unlock();
2809  
2810  	write_lock_irqsave(&cpufreq_driver_lock, flags);
2811  	cpufreq_driver->boost_enabled = !state;
2812  	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2813  
2814  	pr_err("%s: Cannot %s BOOST\n",
2815  	       __func__, state ? "enable" : "disable");
2816  
2817  	return ret;
2818  }
2819  
cpufreq_boost_supported(void)2820  static bool cpufreq_boost_supported(void)
2821  {
2822  	return cpufreq_driver->set_boost;
2823  }
2824  
create_boost_sysfs_file(void)2825  static int create_boost_sysfs_file(void)
2826  {
2827  	int ret;
2828  
2829  	ret = sysfs_create_file(cpufreq_global_kobject, &boost.attr);
2830  	if (ret)
2831  		pr_err("%s: cannot register global BOOST sysfs file\n",
2832  		       __func__);
2833  
2834  	return ret;
2835  }
2836  
remove_boost_sysfs_file(void)2837  static void remove_boost_sysfs_file(void)
2838  {
2839  	if (cpufreq_boost_supported())
2840  		sysfs_remove_file(cpufreq_global_kobject, &boost.attr);
2841  }
2842  
cpufreq_enable_boost_support(void)2843  int cpufreq_enable_boost_support(void)
2844  {
2845  	if (!cpufreq_driver)
2846  		return -EINVAL;
2847  
2848  	if (cpufreq_boost_supported())
2849  		return 0;
2850  
2851  	cpufreq_driver->set_boost = cpufreq_boost_set_sw;
2852  
2853  	/* This will get removed on driver unregister */
2854  	return create_boost_sysfs_file();
2855  }
2856  EXPORT_SYMBOL_GPL(cpufreq_enable_boost_support);
2857  
cpufreq_boost_enabled(void)2858  bool cpufreq_boost_enabled(void)
2859  {
2860  	return cpufreq_driver->boost_enabled;
2861  }
2862  EXPORT_SYMBOL_GPL(cpufreq_boost_enabled);
2863  
2864  /*********************************************************************
2865   *               REGISTER / UNREGISTER CPUFREQ DRIVER                *
2866   *********************************************************************/
2867  static enum cpuhp_state hp_online;
2868  
cpuhp_cpufreq_online(unsigned int cpu)2869  static int cpuhp_cpufreq_online(unsigned int cpu)
2870  {
2871  	cpufreq_online(cpu);
2872  
2873  	return 0;
2874  }
2875  
cpuhp_cpufreq_offline(unsigned int cpu)2876  static int cpuhp_cpufreq_offline(unsigned int cpu)
2877  {
2878  	cpufreq_offline(cpu);
2879  
2880  	return 0;
2881  }
2882  
2883  /**
2884   * cpufreq_register_driver - register a CPU Frequency driver
2885   * @driver_data: A struct cpufreq_driver containing the values#
2886   * submitted by the CPU Frequency driver.
2887   *
2888   * Registers a CPU Frequency driver to this core code. This code
2889   * returns zero on success, -EEXIST when another driver got here first
2890   * (and isn't unregistered in the meantime).
2891   *
2892   */
cpufreq_register_driver(struct cpufreq_driver * driver_data)2893  int cpufreq_register_driver(struct cpufreq_driver *driver_data)
2894  {
2895  	unsigned long flags;
2896  	int ret;
2897  
2898  	if (cpufreq_disabled())
2899  		return -ENODEV;
2900  
2901  	/*
2902  	 * The cpufreq core depends heavily on the availability of device
2903  	 * structure, make sure they are available before proceeding further.
2904  	 */
2905  	if (!get_cpu_device(0))
2906  		return -EPROBE_DEFER;
2907  
2908  	if (!driver_data || !driver_data->verify || !driver_data->init ||
2909  	    !(driver_data->setpolicy || driver_data->target_index ||
2910  		    driver_data->target) ||
2911  	     (driver_data->setpolicy && (driver_data->target_index ||
2912  		    driver_data->target)) ||
2913  	     (!driver_data->get_intermediate != !driver_data->target_intermediate) ||
2914  	     (!driver_data->online != !driver_data->offline) ||
2915  		 (driver_data->adjust_perf && !driver_data->fast_switch))
2916  		return -EINVAL;
2917  
2918  	pr_debug("trying to register driver %s\n", driver_data->name);
2919  
2920  	/* Protect against concurrent CPU online/offline. */
2921  	cpus_read_lock();
2922  
2923  	write_lock_irqsave(&cpufreq_driver_lock, flags);
2924  	if (cpufreq_driver) {
2925  		write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2926  		ret = -EEXIST;
2927  		goto out;
2928  	}
2929  	cpufreq_driver = driver_data;
2930  	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2931  
2932  	/*
2933  	 * Mark support for the scheduler's frequency invariance engine for
2934  	 * drivers that implement target(), target_index() or fast_switch().
2935  	 */
2936  	if (!cpufreq_driver->setpolicy) {
2937  		static_branch_enable_cpuslocked(&cpufreq_freq_invariance);
2938  		pr_debug("supports frequency invariance");
2939  	}
2940  
2941  	if (driver_data->setpolicy)
2942  		driver_data->flags |= CPUFREQ_CONST_LOOPS;
2943  
2944  	if (cpufreq_boost_supported()) {
2945  		ret = create_boost_sysfs_file();
2946  		if (ret)
2947  			goto err_null_driver;
2948  	}
2949  
2950  	ret = subsys_interface_register(&cpufreq_interface);
2951  	if (ret)
2952  		goto err_boost_unreg;
2953  
2954  	if (unlikely(list_empty(&cpufreq_policy_list))) {
2955  		/* if all ->init() calls failed, unregister */
2956  		ret = -ENODEV;
2957  		pr_debug("%s: No CPU initialized for driver %s\n", __func__,
2958  			 driver_data->name);
2959  		goto err_if_unreg;
2960  	}
2961  
2962  	ret = cpuhp_setup_state_nocalls_cpuslocked(CPUHP_AP_ONLINE_DYN,
2963  						   "cpufreq:online",
2964  						   cpuhp_cpufreq_online,
2965  						   cpuhp_cpufreq_offline);
2966  	if (ret < 0)
2967  		goto err_if_unreg;
2968  	hp_online = ret;
2969  	ret = 0;
2970  
2971  	pr_debug("driver %s up and running\n", driver_data->name);
2972  	goto out;
2973  
2974  err_if_unreg:
2975  	subsys_interface_unregister(&cpufreq_interface);
2976  err_boost_unreg:
2977  	remove_boost_sysfs_file();
2978  err_null_driver:
2979  	write_lock_irqsave(&cpufreq_driver_lock, flags);
2980  	cpufreq_driver = NULL;
2981  	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
2982  out:
2983  	cpus_read_unlock();
2984  	return ret;
2985  }
2986  EXPORT_SYMBOL_GPL(cpufreq_register_driver);
2987  
2988  /*
2989   * cpufreq_unregister_driver - unregister the current CPUFreq driver
2990   *
2991   * Unregister the current CPUFreq driver. Only call this if you have
2992   * the right to do so, i.e. if you have succeeded in initialising before!
2993   * Returns zero if successful, and -EINVAL if the cpufreq_driver is
2994   * currently not initialised.
2995   */
cpufreq_unregister_driver(struct cpufreq_driver * driver)2996  void cpufreq_unregister_driver(struct cpufreq_driver *driver)
2997  {
2998  	unsigned long flags;
2999  
3000  	if (WARN_ON(!cpufreq_driver || (driver != cpufreq_driver)))
3001  		return;
3002  
3003  	pr_debug("unregistering driver %s\n", driver->name);
3004  
3005  	/* Protect against concurrent cpu hotplug */
3006  	cpus_read_lock();
3007  	subsys_interface_unregister(&cpufreq_interface);
3008  	remove_boost_sysfs_file();
3009  	static_branch_disable_cpuslocked(&cpufreq_freq_invariance);
3010  	cpuhp_remove_state_nocalls_cpuslocked(hp_online);
3011  
3012  	write_lock_irqsave(&cpufreq_driver_lock, flags);
3013  
3014  	cpufreq_driver = NULL;
3015  
3016  	write_unlock_irqrestore(&cpufreq_driver_lock, flags);
3017  	cpus_read_unlock();
3018  }
3019  EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
3020  
cpufreq_core_init(void)3021  static int __init cpufreq_core_init(void)
3022  {
3023  	struct cpufreq_governor *gov = cpufreq_default_governor();
3024  	struct device *dev_root;
3025  
3026  	if (cpufreq_disabled())
3027  		return -ENODEV;
3028  
3029  	dev_root = bus_get_dev_root(&cpu_subsys);
3030  	if (dev_root) {
3031  		cpufreq_global_kobject = kobject_create_and_add("cpufreq", &dev_root->kobj);
3032  		put_device(dev_root);
3033  	}
3034  	BUG_ON(!cpufreq_global_kobject);
3035  
3036  	if (!strlen(default_governor))
3037  		strscpy(default_governor, gov->name, CPUFREQ_NAME_LEN);
3038  
3039  	return 0;
3040  }
3041  module_param(off, int, 0444);
3042  module_param_string(default_governor, default_governor, CPUFREQ_NAME_LEN, 0444);
3043  core_initcall(cpufreq_core_init);
3044