1  // SPDX-License-Identifier: GPL-2.0
2  /*
3   * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
4   * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5   *
6   * Standard functionality for the common clock API.  See Documentation/driver-api/clk.rst
7   */
8  
9  #include <linux/clk.h>
10  #include <linux/clk-provider.h>
11  #include <linux/clk/clk-conf.h>
12  #include <linux/module.h>
13  #include <linux/mutex.h>
14  #include <linux/spinlock.h>
15  #include <linux/err.h>
16  #include <linux/list.h>
17  #include <linux/slab.h>
18  #include <linux/of.h>
19  #include <linux/device.h>
20  #include <linux/init.h>
21  #include <linux/pm_runtime.h>
22  #include <linux/sched.h>
23  #include <linux/clkdev.h>
24  
25  #include "clk.h"
26  
27  static DEFINE_SPINLOCK(enable_lock);
28  static DEFINE_MUTEX(prepare_lock);
29  
30  static struct task_struct *prepare_owner;
31  static struct task_struct *enable_owner;
32  
33  static int prepare_refcnt;
34  static int enable_refcnt;
35  
36  static HLIST_HEAD(clk_root_list);
37  static HLIST_HEAD(clk_orphan_list);
38  static LIST_HEAD(clk_notifier_list);
39  
40  /* List of registered clks that use runtime PM */
41  static HLIST_HEAD(clk_rpm_list);
42  static DEFINE_MUTEX(clk_rpm_list_lock);
43  
44  static const struct hlist_head *all_lists[] = {
45  	&clk_root_list,
46  	&clk_orphan_list,
47  	NULL,
48  };
49  
50  /***    private data structures    ***/
51  
52  struct clk_parent_map {
53  	const struct clk_hw	*hw;
54  	struct clk_core		*core;
55  	const char		*fw_name;
56  	const char		*name;
57  	int			index;
58  };
59  
60  struct clk_core {
61  	const char		*name;
62  	const struct clk_ops	*ops;
63  	struct clk_hw		*hw;
64  	struct module		*owner;
65  	struct device		*dev;
66  	struct hlist_node	rpm_node;
67  	struct device_node	*of_node;
68  	struct clk_core		*parent;
69  	struct clk_parent_map	*parents;
70  	u8			num_parents;
71  	u8			new_parent_index;
72  	unsigned long		rate;
73  	unsigned long		req_rate;
74  	unsigned long		new_rate;
75  	struct clk_core		*new_parent;
76  	struct clk_core		*new_child;
77  	unsigned long		flags;
78  	bool			orphan;
79  	bool			rpm_enabled;
80  	unsigned int		enable_count;
81  	unsigned int		prepare_count;
82  	unsigned int		protect_count;
83  	unsigned long		min_rate;
84  	unsigned long		max_rate;
85  	unsigned long		accuracy;
86  	int			phase;
87  	struct clk_duty		duty;
88  	struct hlist_head	children;
89  	struct hlist_node	child_node;
90  	struct hlist_head	clks;
91  	unsigned int		notifier_count;
92  #ifdef CONFIG_DEBUG_FS
93  	struct dentry		*dentry;
94  	struct hlist_node	debug_node;
95  #endif
96  	struct kref		ref;
97  };
98  
99  #define CREATE_TRACE_POINTS
100  #include <trace/events/clk.h>
101  
102  struct clk {
103  	struct clk_core	*core;
104  	struct device *dev;
105  	const char *dev_id;
106  	const char *con_id;
107  	unsigned long min_rate;
108  	unsigned long max_rate;
109  	unsigned int exclusive_count;
110  	struct hlist_node clks_node;
111  };
112  
113  /***           runtime pm          ***/
clk_pm_runtime_get(struct clk_core * core)114  static int clk_pm_runtime_get(struct clk_core *core)
115  {
116  	if (!core->rpm_enabled)
117  		return 0;
118  
119  	return pm_runtime_resume_and_get(core->dev);
120  }
121  
clk_pm_runtime_put(struct clk_core * core)122  static void clk_pm_runtime_put(struct clk_core *core)
123  {
124  	if (!core->rpm_enabled)
125  		return;
126  
127  	pm_runtime_put_sync(core->dev);
128  }
129  
130  /**
131   * clk_pm_runtime_get_all() - Runtime "get" all clk provider devices
132   *
133   * Call clk_pm_runtime_get() on all runtime PM enabled clks in the clk tree so
134   * that disabling unused clks avoids a deadlock where a device is runtime PM
135   * resuming/suspending and the runtime PM callback is trying to grab the
136   * prepare_lock for something like clk_prepare_enable() while
137   * clk_disable_unused_subtree() holds the prepare_lock and is trying to runtime
138   * PM resume/suspend the device as well.
139   *
140   * Context: Acquires the 'clk_rpm_list_lock' and returns with the lock held on
141   * success. Otherwise the lock is released on failure.
142   *
143   * Return: 0 on success, negative errno otherwise.
144   */
clk_pm_runtime_get_all(void)145  static int clk_pm_runtime_get_all(void)
146  {
147  	int ret;
148  	struct clk_core *core, *failed;
149  
150  	/*
151  	 * Grab the list lock to prevent any new clks from being registered
152  	 * or unregistered until clk_pm_runtime_put_all().
153  	 */
154  	mutex_lock(&clk_rpm_list_lock);
155  
156  	/*
157  	 * Runtime PM "get" all the devices that are needed for the clks
158  	 * currently registered. Do this without holding the prepare_lock, to
159  	 * avoid the deadlock.
160  	 */
161  	hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
162  		ret = clk_pm_runtime_get(core);
163  		if (ret) {
164  			failed = core;
165  			pr_err("clk: Failed to runtime PM get '%s' for clk '%s'\n",
166  			       dev_name(failed->dev), failed->name);
167  			goto err;
168  		}
169  	}
170  
171  	return 0;
172  
173  err:
174  	hlist_for_each_entry(core, &clk_rpm_list, rpm_node) {
175  		if (core == failed)
176  			break;
177  
178  		clk_pm_runtime_put(core);
179  	}
180  	mutex_unlock(&clk_rpm_list_lock);
181  
182  	return ret;
183  }
184  
185  /**
186   * clk_pm_runtime_put_all() - Runtime "put" all clk provider devices
187   *
188   * Put the runtime PM references taken in clk_pm_runtime_get_all() and release
189   * the 'clk_rpm_list_lock'.
190   */
clk_pm_runtime_put_all(void)191  static void clk_pm_runtime_put_all(void)
192  {
193  	struct clk_core *core;
194  
195  	hlist_for_each_entry(core, &clk_rpm_list, rpm_node)
196  		clk_pm_runtime_put(core);
197  	mutex_unlock(&clk_rpm_list_lock);
198  }
199  
clk_pm_runtime_init(struct clk_core * core)200  static void clk_pm_runtime_init(struct clk_core *core)
201  {
202  	struct device *dev = core->dev;
203  
204  	if (dev && pm_runtime_enabled(dev)) {
205  		core->rpm_enabled = true;
206  
207  		mutex_lock(&clk_rpm_list_lock);
208  		hlist_add_head(&core->rpm_node, &clk_rpm_list);
209  		mutex_unlock(&clk_rpm_list_lock);
210  	}
211  }
212  
213  /***           locking             ***/
clk_prepare_lock(void)214  static void clk_prepare_lock(void)
215  {
216  	if (!mutex_trylock(&prepare_lock)) {
217  		if (prepare_owner == current) {
218  			prepare_refcnt++;
219  			return;
220  		}
221  		mutex_lock(&prepare_lock);
222  	}
223  	WARN_ON_ONCE(prepare_owner != NULL);
224  	WARN_ON_ONCE(prepare_refcnt != 0);
225  	prepare_owner = current;
226  	prepare_refcnt = 1;
227  }
228  
clk_prepare_unlock(void)229  static void clk_prepare_unlock(void)
230  {
231  	WARN_ON_ONCE(prepare_owner != current);
232  	WARN_ON_ONCE(prepare_refcnt == 0);
233  
234  	if (--prepare_refcnt)
235  		return;
236  	prepare_owner = NULL;
237  	mutex_unlock(&prepare_lock);
238  }
239  
clk_enable_lock(void)240  static unsigned long clk_enable_lock(void)
241  	__acquires(enable_lock)
242  {
243  	unsigned long flags;
244  
245  	/*
246  	 * On UP systems, spin_trylock_irqsave() always returns true, even if
247  	 * we already hold the lock. So, in that case, we rely only on
248  	 * reference counting.
249  	 */
250  	if (!IS_ENABLED(CONFIG_SMP) ||
251  	    !spin_trylock_irqsave(&enable_lock, flags)) {
252  		if (enable_owner == current) {
253  			enable_refcnt++;
254  			__acquire(enable_lock);
255  			if (!IS_ENABLED(CONFIG_SMP))
256  				local_save_flags(flags);
257  			return flags;
258  		}
259  		spin_lock_irqsave(&enable_lock, flags);
260  	}
261  	WARN_ON_ONCE(enable_owner != NULL);
262  	WARN_ON_ONCE(enable_refcnt != 0);
263  	enable_owner = current;
264  	enable_refcnt = 1;
265  	return flags;
266  }
267  
clk_enable_unlock(unsigned long flags)268  static void clk_enable_unlock(unsigned long flags)
269  	__releases(enable_lock)
270  {
271  	WARN_ON_ONCE(enable_owner != current);
272  	WARN_ON_ONCE(enable_refcnt == 0);
273  
274  	if (--enable_refcnt) {
275  		__release(enable_lock);
276  		return;
277  	}
278  	enable_owner = NULL;
279  	spin_unlock_irqrestore(&enable_lock, flags);
280  }
281  
clk_core_rate_is_protected(struct clk_core * core)282  static bool clk_core_rate_is_protected(struct clk_core *core)
283  {
284  	return core->protect_count;
285  }
286  
clk_core_is_prepared(struct clk_core * core)287  static bool clk_core_is_prepared(struct clk_core *core)
288  {
289  	bool ret = false;
290  
291  	/*
292  	 * .is_prepared is optional for clocks that can prepare
293  	 * fall back to software usage counter if it is missing
294  	 */
295  	if (!core->ops->is_prepared)
296  		return core->prepare_count;
297  
298  	if (!clk_pm_runtime_get(core)) {
299  		ret = core->ops->is_prepared(core->hw);
300  		clk_pm_runtime_put(core);
301  	}
302  
303  	return ret;
304  }
305  
clk_core_is_enabled(struct clk_core * core)306  static bool clk_core_is_enabled(struct clk_core *core)
307  {
308  	bool ret = false;
309  
310  	/*
311  	 * .is_enabled is only mandatory for clocks that gate
312  	 * fall back to software usage counter if .is_enabled is missing
313  	 */
314  	if (!core->ops->is_enabled)
315  		return core->enable_count;
316  
317  	/*
318  	 * Check if clock controller's device is runtime active before
319  	 * calling .is_enabled callback. If not, assume that clock is
320  	 * disabled, because we might be called from atomic context, from
321  	 * which pm_runtime_get() is not allowed.
322  	 * This function is called mainly from clk_disable_unused_subtree,
323  	 * which ensures proper runtime pm activation of controller before
324  	 * taking enable spinlock, but the below check is needed if one tries
325  	 * to call it from other places.
326  	 */
327  	if (core->rpm_enabled) {
328  		pm_runtime_get_noresume(core->dev);
329  		if (!pm_runtime_active(core->dev)) {
330  			ret = false;
331  			goto done;
332  		}
333  	}
334  
335  	/*
336  	 * This could be called with the enable lock held, or from atomic
337  	 * context. If the parent isn't enabled already, we can't do
338  	 * anything here. We can also assume this clock isn't enabled.
339  	 */
340  	if ((core->flags & CLK_OPS_PARENT_ENABLE) && core->parent)
341  		if (!clk_core_is_enabled(core->parent)) {
342  			ret = false;
343  			goto done;
344  		}
345  
346  	ret = core->ops->is_enabled(core->hw);
347  done:
348  	if (core->rpm_enabled)
349  		pm_runtime_put(core->dev);
350  
351  	return ret;
352  }
353  
354  /***    helper functions   ***/
355  
__clk_get_name(const struct clk * clk)356  const char *__clk_get_name(const struct clk *clk)
357  {
358  	return !clk ? NULL : clk->core->name;
359  }
360  EXPORT_SYMBOL_GPL(__clk_get_name);
361  
clk_hw_get_name(const struct clk_hw * hw)362  const char *clk_hw_get_name(const struct clk_hw *hw)
363  {
364  	return hw->core->name;
365  }
366  EXPORT_SYMBOL_GPL(clk_hw_get_name);
367  
__clk_get_hw(struct clk * clk)368  struct clk_hw *__clk_get_hw(struct clk *clk)
369  {
370  	return !clk ? NULL : clk->core->hw;
371  }
372  EXPORT_SYMBOL_GPL(__clk_get_hw);
373  
clk_hw_get_num_parents(const struct clk_hw * hw)374  unsigned int clk_hw_get_num_parents(const struct clk_hw *hw)
375  {
376  	return hw->core->num_parents;
377  }
378  EXPORT_SYMBOL_GPL(clk_hw_get_num_parents);
379  
clk_hw_get_parent(const struct clk_hw * hw)380  struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw)
381  {
382  	return hw->core->parent ? hw->core->parent->hw : NULL;
383  }
384  EXPORT_SYMBOL_GPL(clk_hw_get_parent);
385  
__clk_lookup_subtree(const char * name,struct clk_core * core)386  static struct clk_core *__clk_lookup_subtree(const char *name,
387  					     struct clk_core *core)
388  {
389  	struct clk_core *child;
390  	struct clk_core *ret;
391  
392  	if (!strcmp(core->name, name))
393  		return core;
394  
395  	hlist_for_each_entry(child, &core->children, child_node) {
396  		ret = __clk_lookup_subtree(name, child);
397  		if (ret)
398  			return ret;
399  	}
400  
401  	return NULL;
402  }
403  
clk_core_lookup(const char * name)404  static struct clk_core *clk_core_lookup(const char *name)
405  {
406  	struct clk_core *root_clk;
407  	struct clk_core *ret;
408  
409  	if (!name)
410  		return NULL;
411  
412  	/* search the 'proper' clk tree first */
413  	hlist_for_each_entry(root_clk, &clk_root_list, child_node) {
414  		ret = __clk_lookup_subtree(name, root_clk);
415  		if (ret)
416  			return ret;
417  	}
418  
419  	/* if not found, then search the orphan tree */
420  	hlist_for_each_entry(root_clk, &clk_orphan_list, child_node) {
421  		ret = __clk_lookup_subtree(name, root_clk);
422  		if (ret)
423  			return ret;
424  	}
425  
426  	return NULL;
427  }
428  
429  #ifdef CONFIG_OF
430  static int of_parse_clkspec(const struct device_node *np, int index,
431  			    const char *name, struct of_phandle_args *out_args);
432  static struct clk_hw *
433  of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec);
434  #else
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)435  static inline int of_parse_clkspec(const struct device_node *np, int index,
436  				   const char *name,
437  				   struct of_phandle_args *out_args)
438  {
439  	return -ENOENT;
440  }
441  static inline struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)442  of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
443  {
444  	return ERR_PTR(-ENOENT);
445  }
446  #endif
447  
448  /**
449   * clk_core_get - Find the clk_core parent of a clk
450   * @core: clk to find parent of
451   * @p_index: parent index to search for
452   *
453   * This is the preferred method for clk providers to find the parent of a
454   * clk when that parent is external to the clk controller. The parent_names
455   * array is indexed and treated as a local name matching a string in the device
456   * node's 'clock-names' property or as the 'con_id' matching the device's
457   * dev_name() in a clk_lookup. This allows clk providers to use their own
458   * namespace instead of looking for a globally unique parent string.
459   *
460   * For example the following DT snippet would allow a clock registered by the
461   * clock-controller@c001 that has a clk_init_data::parent_data array
462   * with 'xtal' in the 'name' member to find the clock provided by the
463   * clock-controller@f00abcd without needing to get the globally unique name of
464   * the xtal clk.
465   *
466   *      parent: clock-controller@f00abcd {
467   *              reg = <0xf00abcd 0xabcd>;
468   *              #clock-cells = <0>;
469   *      };
470   *
471   *      clock-controller@c001 {
472   *              reg = <0xc001 0xf00d>;
473   *              clocks = <&parent>;
474   *              clock-names = "xtal";
475   *              #clock-cells = <1>;
476   *      };
477   *
478   * Returns: -ENOENT when the provider can't be found or the clk doesn't
479   * exist in the provider or the name can't be found in the DT node or
480   * in a clkdev lookup. NULL when the provider knows about the clk but it
481   * isn't provided on this system.
482   * A valid clk_core pointer when the clk can be found in the provider.
483   */
clk_core_get(struct clk_core * core,u8 p_index)484  static struct clk_core *clk_core_get(struct clk_core *core, u8 p_index)
485  {
486  	const char *name = core->parents[p_index].fw_name;
487  	int index = core->parents[p_index].index;
488  	struct clk_hw *hw = ERR_PTR(-ENOENT);
489  	struct device *dev = core->dev;
490  	const char *dev_id = dev ? dev_name(dev) : NULL;
491  	struct device_node *np = core->of_node;
492  	struct of_phandle_args clkspec;
493  
494  	if (np && (name || index >= 0) &&
495  	    !of_parse_clkspec(np, index, name, &clkspec)) {
496  		hw = of_clk_get_hw_from_clkspec(&clkspec);
497  		of_node_put(clkspec.np);
498  	} else if (name) {
499  		/*
500  		 * If the DT search above couldn't find the provider fallback to
501  		 * looking up via clkdev based clk_lookups.
502  		 */
503  		hw = clk_find_hw(dev_id, name);
504  	}
505  
506  	if (IS_ERR(hw))
507  		return ERR_CAST(hw);
508  
509  	if (!hw)
510  		return NULL;
511  
512  	return hw->core;
513  }
514  
clk_core_fill_parent_index(struct clk_core * core,u8 index)515  static void clk_core_fill_parent_index(struct clk_core *core, u8 index)
516  {
517  	struct clk_parent_map *entry = &core->parents[index];
518  	struct clk_core *parent;
519  
520  	if (entry->hw) {
521  		parent = entry->hw->core;
522  	} else {
523  		parent = clk_core_get(core, index);
524  		if (PTR_ERR(parent) == -ENOENT && entry->name)
525  			parent = clk_core_lookup(entry->name);
526  	}
527  
528  	/*
529  	 * We have a direct reference but it isn't registered yet?
530  	 * Orphan it and let clk_reparent() update the orphan status
531  	 * when the parent is registered.
532  	 */
533  	if (!parent)
534  		parent = ERR_PTR(-EPROBE_DEFER);
535  
536  	/* Only cache it if it's not an error */
537  	if (!IS_ERR(parent))
538  		entry->core = parent;
539  }
540  
clk_core_get_parent_by_index(struct clk_core * core,u8 index)541  static struct clk_core *clk_core_get_parent_by_index(struct clk_core *core,
542  							 u8 index)
543  {
544  	if (!core || index >= core->num_parents || !core->parents)
545  		return NULL;
546  
547  	if (!core->parents[index].core)
548  		clk_core_fill_parent_index(core, index);
549  
550  	return core->parents[index].core;
551  }
552  
553  struct clk_hw *
clk_hw_get_parent_by_index(const struct clk_hw * hw,unsigned int index)554  clk_hw_get_parent_by_index(const struct clk_hw *hw, unsigned int index)
555  {
556  	struct clk_core *parent;
557  
558  	parent = clk_core_get_parent_by_index(hw->core, index);
559  
560  	return !parent ? NULL : parent->hw;
561  }
562  EXPORT_SYMBOL_GPL(clk_hw_get_parent_by_index);
563  
__clk_get_enable_count(struct clk * clk)564  unsigned int __clk_get_enable_count(struct clk *clk)
565  {
566  	return !clk ? 0 : clk->core->enable_count;
567  }
568  
clk_core_get_rate_nolock(struct clk_core * core)569  static unsigned long clk_core_get_rate_nolock(struct clk_core *core)
570  {
571  	if (!core)
572  		return 0;
573  
574  	if (!core->num_parents || core->parent)
575  		return core->rate;
576  
577  	/*
578  	 * Clk must have a parent because num_parents > 0 but the parent isn't
579  	 * known yet. Best to return 0 as the rate of this clk until we can
580  	 * properly recalc the rate based on the parent's rate.
581  	 */
582  	return 0;
583  }
584  
clk_hw_get_rate(const struct clk_hw * hw)585  unsigned long clk_hw_get_rate(const struct clk_hw *hw)
586  {
587  	return clk_core_get_rate_nolock(hw->core);
588  }
589  EXPORT_SYMBOL_GPL(clk_hw_get_rate);
590  
clk_core_get_accuracy_no_lock(struct clk_core * core)591  static unsigned long clk_core_get_accuracy_no_lock(struct clk_core *core)
592  {
593  	if (!core)
594  		return 0;
595  
596  	return core->accuracy;
597  }
598  
clk_hw_get_flags(const struct clk_hw * hw)599  unsigned long clk_hw_get_flags(const struct clk_hw *hw)
600  {
601  	return hw->core->flags;
602  }
603  EXPORT_SYMBOL_GPL(clk_hw_get_flags);
604  
clk_hw_is_prepared(const struct clk_hw * hw)605  bool clk_hw_is_prepared(const struct clk_hw *hw)
606  {
607  	return clk_core_is_prepared(hw->core);
608  }
609  EXPORT_SYMBOL_GPL(clk_hw_is_prepared);
610  
clk_hw_rate_is_protected(const struct clk_hw * hw)611  bool clk_hw_rate_is_protected(const struct clk_hw *hw)
612  {
613  	return clk_core_rate_is_protected(hw->core);
614  }
615  EXPORT_SYMBOL_GPL(clk_hw_rate_is_protected);
616  
clk_hw_is_enabled(const struct clk_hw * hw)617  bool clk_hw_is_enabled(const struct clk_hw *hw)
618  {
619  	return clk_core_is_enabled(hw->core);
620  }
621  EXPORT_SYMBOL_GPL(clk_hw_is_enabled);
622  
__clk_is_enabled(struct clk * clk)623  bool __clk_is_enabled(struct clk *clk)
624  {
625  	if (!clk)
626  		return false;
627  
628  	return clk_core_is_enabled(clk->core);
629  }
630  EXPORT_SYMBOL_GPL(__clk_is_enabled);
631  
mux_is_better_rate(unsigned long rate,unsigned long now,unsigned long best,unsigned long flags)632  static bool mux_is_better_rate(unsigned long rate, unsigned long now,
633  			   unsigned long best, unsigned long flags)
634  {
635  	if (flags & CLK_MUX_ROUND_CLOSEST)
636  		return abs(now - rate) < abs(best - rate);
637  
638  	return now <= rate && now > best;
639  }
640  
641  static void clk_core_init_rate_req(struct clk_core * const core,
642  				   struct clk_rate_request *req,
643  				   unsigned long rate);
644  
645  static int clk_core_round_rate_nolock(struct clk_core *core,
646  				      struct clk_rate_request *req);
647  
clk_core_has_parent(struct clk_core * core,const struct clk_core * parent)648  static bool clk_core_has_parent(struct clk_core *core, const struct clk_core *parent)
649  {
650  	struct clk_core *tmp;
651  	unsigned int i;
652  
653  	/* Optimize for the case where the parent is already the parent. */
654  	if (core->parent == parent)
655  		return true;
656  
657  	for (i = 0; i < core->num_parents; i++) {
658  		tmp = clk_core_get_parent_by_index(core, i);
659  		if (!tmp)
660  			continue;
661  
662  		if (tmp == parent)
663  			return true;
664  	}
665  
666  	return false;
667  }
668  
669  static void
clk_core_forward_rate_req(struct clk_core * core,const struct clk_rate_request * old_req,struct clk_core * parent,struct clk_rate_request * req,unsigned long parent_rate)670  clk_core_forward_rate_req(struct clk_core *core,
671  			  const struct clk_rate_request *old_req,
672  			  struct clk_core *parent,
673  			  struct clk_rate_request *req,
674  			  unsigned long parent_rate)
675  {
676  	if (WARN_ON(!clk_core_has_parent(core, parent)))
677  		return;
678  
679  	clk_core_init_rate_req(parent, req, parent_rate);
680  
681  	if (req->min_rate < old_req->min_rate)
682  		req->min_rate = old_req->min_rate;
683  
684  	if (req->max_rate > old_req->max_rate)
685  		req->max_rate = old_req->max_rate;
686  }
687  
688  static int
clk_core_determine_rate_no_reparent(struct clk_hw * hw,struct clk_rate_request * req)689  clk_core_determine_rate_no_reparent(struct clk_hw *hw,
690  				    struct clk_rate_request *req)
691  {
692  	struct clk_core *core = hw->core;
693  	struct clk_core *parent = core->parent;
694  	unsigned long best;
695  	int ret;
696  
697  	if (core->flags & CLK_SET_RATE_PARENT) {
698  		struct clk_rate_request parent_req;
699  
700  		if (!parent) {
701  			req->rate = 0;
702  			return 0;
703  		}
704  
705  		clk_core_forward_rate_req(core, req, parent, &parent_req,
706  					  req->rate);
707  
708  		trace_clk_rate_request_start(&parent_req);
709  
710  		ret = clk_core_round_rate_nolock(parent, &parent_req);
711  		if (ret)
712  			return ret;
713  
714  		trace_clk_rate_request_done(&parent_req);
715  
716  		best = parent_req.rate;
717  	} else if (parent) {
718  		best = clk_core_get_rate_nolock(parent);
719  	} else {
720  		best = clk_core_get_rate_nolock(core);
721  	}
722  
723  	req->best_parent_rate = best;
724  	req->rate = best;
725  
726  	return 0;
727  }
728  
clk_mux_determine_rate_flags(struct clk_hw * hw,struct clk_rate_request * req,unsigned long flags)729  int clk_mux_determine_rate_flags(struct clk_hw *hw,
730  				 struct clk_rate_request *req,
731  				 unsigned long flags)
732  {
733  	struct clk_core *core = hw->core, *parent, *best_parent = NULL;
734  	int i, num_parents, ret;
735  	unsigned long best = 0;
736  
737  	/* if NO_REPARENT flag set, pass through to current parent */
738  	if (core->flags & CLK_SET_RATE_NO_REPARENT)
739  		return clk_core_determine_rate_no_reparent(hw, req);
740  
741  	/* find the parent that can provide the fastest rate <= rate */
742  	num_parents = core->num_parents;
743  	for (i = 0; i < num_parents; i++) {
744  		unsigned long parent_rate;
745  
746  		parent = clk_core_get_parent_by_index(core, i);
747  		if (!parent)
748  			continue;
749  
750  		if (core->flags & CLK_SET_RATE_PARENT) {
751  			struct clk_rate_request parent_req;
752  
753  			clk_core_forward_rate_req(core, req, parent, &parent_req, req->rate);
754  
755  			trace_clk_rate_request_start(&parent_req);
756  
757  			ret = clk_core_round_rate_nolock(parent, &parent_req);
758  			if (ret)
759  				continue;
760  
761  			trace_clk_rate_request_done(&parent_req);
762  
763  			parent_rate = parent_req.rate;
764  		} else {
765  			parent_rate = clk_core_get_rate_nolock(parent);
766  		}
767  
768  		if (mux_is_better_rate(req->rate, parent_rate,
769  				       best, flags)) {
770  			best_parent = parent;
771  			best = parent_rate;
772  		}
773  	}
774  
775  	if (!best_parent)
776  		return -EINVAL;
777  
778  	req->best_parent_hw = best_parent->hw;
779  	req->best_parent_rate = best;
780  	req->rate = best;
781  
782  	return 0;
783  }
784  EXPORT_SYMBOL_GPL(clk_mux_determine_rate_flags);
785  
__clk_lookup(const char * name)786  struct clk *__clk_lookup(const char *name)
787  {
788  	struct clk_core *core = clk_core_lookup(name);
789  
790  	return !core ? NULL : core->hw->clk;
791  }
792  
clk_core_get_boundaries(struct clk_core * core,unsigned long * min_rate,unsigned long * max_rate)793  static void clk_core_get_boundaries(struct clk_core *core,
794  				    unsigned long *min_rate,
795  				    unsigned long *max_rate)
796  {
797  	struct clk *clk_user;
798  
799  	lockdep_assert_held(&prepare_lock);
800  
801  	*min_rate = core->min_rate;
802  	*max_rate = core->max_rate;
803  
804  	hlist_for_each_entry(clk_user, &core->clks, clks_node)
805  		*min_rate = max(*min_rate, clk_user->min_rate);
806  
807  	hlist_for_each_entry(clk_user, &core->clks, clks_node)
808  		*max_rate = min(*max_rate, clk_user->max_rate);
809  }
810  
811  /*
812   * clk_hw_get_rate_range() - returns the clock rate range for a hw clk
813   * @hw: the hw clk we want to get the range from
814   * @min_rate: pointer to the variable that will hold the minimum
815   * @max_rate: pointer to the variable that will hold the maximum
816   *
817   * Fills the @min_rate and @max_rate variables with the minimum and
818   * maximum that clock can reach.
819   */
clk_hw_get_rate_range(struct clk_hw * hw,unsigned long * min_rate,unsigned long * max_rate)820  void clk_hw_get_rate_range(struct clk_hw *hw, unsigned long *min_rate,
821  			   unsigned long *max_rate)
822  {
823  	clk_core_get_boundaries(hw->core, min_rate, max_rate);
824  }
825  EXPORT_SYMBOL_GPL(clk_hw_get_rate_range);
826  
clk_core_check_boundaries(struct clk_core * core,unsigned long min_rate,unsigned long max_rate)827  static bool clk_core_check_boundaries(struct clk_core *core,
828  				      unsigned long min_rate,
829  				      unsigned long max_rate)
830  {
831  	struct clk *user;
832  
833  	lockdep_assert_held(&prepare_lock);
834  
835  	if (min_rate > core->max_rate || max_rate < core->min_rate)
836  		return false;
837  
838  	hlist_for_each_entry(user, &core->clks, clks_node)
839  		if (min_rate > user->max_rate || max_rate < user->min_rate)
840  			return false;
841  
842  	return true;
843  }
844  
clk_hw_set_rate_range(struct clk_hw * hw,unsigned long min_rate,unsigned long max_rate)845  void clk_hw_set_rate_range(struct clk_hw *hw, unsigned long min_rate,
846  			   unsigned long max_rate)
847  {
848  	hw->core->min_rate = min_rate;
849  	hw->core->max_rate = max_rate;
850  }
851  EXPORT_SYMBOL_GPL(clk_hw_set_rate_range);
852  
853  /*
854   * __clk_mux_determine_rate - clk_ops::determine_rate implementation for a mux type clk
855   * @hw: mux type clk to determine rate on
856   * @req: rate request, also used to return preferred parent and frequencies
857   *
858   * Helper for finding best parent to provide a given frequency. This can be used
859   * directly as a determine_rate callback (e.g. for a mux), or from a more
860   * complex clock that may combine a mux with other operations.
861   *
862   * Returns: 0 on success, -EERROR value on error
863   */
__clk_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)864  int __clk_mux_determine_rate(struct clk_hw *hw,
865  			     struct clk_rate_request *req)
866  {
867  	return clk_mux_determine_rate_flags(hw, req, 0);
868  }
869  EXPORT_SYMBOL_GPL(__clk_mux_determine_rate);
870  
__clk_mux_determine_rate_closest(struct clk_hw * hw,struct clk_rate_request * req)871  int __clk_mux_determine_rate_closest(struct clk_hw *hw,
872  				     struct clk_rate_request *req)
873  {
874  	return clk_mux_determine_rate_flags(hw, req, CLK_MUX_ROUND_CLOSEST);
875  }
876  EXPORT_SYMBOL_GPL(__clk_mux_determine_rate_closest);
877  
878  /*
879   * clk_hw_determine_rate_no_reparent - clk_ops::determine_rate implementation for a clk that doesn't reparent
880   * @hw: mux type clk to determine rate on
881   * @req: rate request, also used to return preferred frequency
882   *
883   * Helper for finding best parent rate to provide a given frequency.
884   * This can be used directly as a determine_rate callback (e.g. for a
885   * mux), or from a more complex clock that may combine a mux with other
886   * operations.
887   *
888   * Returns: 0 on success, -EERROR value on error
889   */
clk_hw_determine_rate_no_reparent(struct clk_hw * hw,struct clk_rate_request * req)890  int clk_hw_determine_rate_no_reparent(struct clk_hw *hw,
891  				      struct clk_rate_request *req)
892  {
893  	return clk_core_determine_rate_no_reparent(hw, req);
894  }
895  EXPORT_SYMBOL_GPL(clk_hw_determine_rate_no_reparent);
896  
897  /***        clk api        ***/
898  
clk_core_rate_unprotect(struct clk_core * core)899  static void clk_core_rate_unprotect(struct clk_core *core)
900  {
901  	lockdep_assert_held(&prepare_lock);
902  
903  	if (!core)
904  		return;
905  
906  	if (WARN(core->protect_count == 0,
907  	    "%s already unprotected\n", core->name))
908  		return;
909  
910  	if (--core->protect_count > 0)
911  		return;
912  
913  	clk_core_rate_unprotect(core->parent);
914  }
915  
clk_core_rate_nuke_protect(struct clk_core * core)916  static int clk_core_rate_nuke_protect(struct clk_core *core)
917  {
918  	int ret;
919  
920  	lockdep_assert_held(&prepare_lock);
921  
922  	if (!core)
923  		return -EINVAL;
924  
925  	if (core->protect_count == 0)
926  		return 0;
927  
928  	ret = core->protect_count;
929  	core->protect_count = 1;
930  	clk_core_rate_unprotect(core);
931  
932  	return ret;
933  }
934  
935  /**
936   * clk_rate_exclusive_put - release exclusivity over clock rate control
937   * @clk: the clk over which the exclusivity is released
938   *
939   * clk_rate_exclusive_put() completes a critical section during which a clock
940   * consumer cannot tolerate any other consumer making any operation on the
941   * clock which could result in a rate change or rate glitch. Exclusive clocks
942   * cannot have their rate changed, either directly or indirectly due to changes
943   * further up the parent chain of clocks. As a result, clocks up parent chain
944   * also get under exclusive control of the calling consumer.
945   *
946   * If exlusivity is claimed more than once on clock, even by the same consumer,
947   * the rate effectively gets locked as exclusivity can't be preempted.
948   *
949   * Calls to clk_rate_exclusive_put() must be balanced with calls to
950   * clk_rate_exclusive_get(). Calls to this function may sleep, and do not return
951   * error status.
952   */
clk_rate_exclusive_put(struct clk * clk)953  void clk_rate_exclusive_put(struct clk *clk)
954  {
955  	if (!clk)
956  		return;
957  
958  	clk_prepare_lock();
959  
960  	/*
961  	 * if there is something wrong with this consumer protect count, stop
962  	 * here before messing with the provider
963  	 */
964  	if (WARN_ON(clk->exclusive_count <= 0))
965  		goto out;
966  
967  	clk_core_rate_unprotect(clk->core);
968  	clk->exclusive_count--;
969  out:
970  	clk_prepare_unlock();
971  }
972  EXPORT_SYMBOL_GPL(clk_rate_exclusive_put);
973  
clk_core_rate_protect(struct clk_core * core)974  static void clk_core_rate_protect(struct clk_core *core)
975  {
976  	lockdep_assert_held(&prepare_lock);
977  
978  	if (!core)
979  		return;
980  
981  	if (core->protect_count == 0)
982  		clk_core_rate_protect(core->parent);
983  
984  	core->protect_count++;
985  }
986  
clk_core_rate_restore_protect(struct clk_core * core,int count)987  static void clk_core_rate_restore_protect(struct clk_core *core, int count)
988  {
989  	lockdep_assert_held(&prepare_lock);
990  
991  	if (!core)
992  		return;
993  
994  	if (count == 0)
995  		return;
996  
997  	clk_core_rate_protect(core);
998  	core->protect_count = count;
999  }
1000  
1001  /**
1002   * clk_rate_exclusive_get - get exclusivity over the clk rate control
1003   * @clk: the clk over which the exclusity of rate control is requested
1004   *
1005   * clk_rate_exclusive_get() begins a critical section during which a clock
1006   * consumer cannot tolerate any other consumer making any operation on the
1007   * clock which could result in a rate change or rate glitch. Exclusive clocks
1008   * cannot have their rate changed, either directly or indirectly due to changes
1009   * further up the parent chain of clocks. As a result, clocks up parent chain
1010   * also get under exclusive control of the calling consumer.
1011   *
1012   * If exlusivity is claimed more than once on clock, even by the same consumer,
1013   * the rate effectively gets locked as exclusivity can't be preempted.
1014   *
1015   * Calls to clk_rate_exclusive_get() should be balanced with calls to
1016   * clk_rate_exclusive_put(). Calls to this function may sleep.
1017   * Returns 0 on success, -EERROR otherwise
1018   */
clk_rate_exclusive_get(struct clk * clk)1019  int clk_rate_exclusive_get(struct clk *clk)
1020  {
1021  	if (!clk)
1022  		return 0;
1023  
1024  	clk_prepare_lock();
1025  	clk_core_rate_protect(clk->core);
1026  	clk->exclusive_count++;
1027  	clk_prepare_unlock();
1028  
1029  	return 0;
1030  }
1031  EXPORT_SYMBOL_GPL(clk_rate_exclusive_get);
1032  
devm_clk_rate_exclusive_put(void * data)1033  static void devm_clk_rate_exclusive_put(void *data)
1034  {
1035  	struct clk *clk = data;
1036  
1037  	clk_rate_exclusive_put(clk);
1038  }
1039  
devm_clk_rate_exclusive_get(struct device * dev,struct clk * clk)1040  int devm_clk_rate_exclusive_get(struct device *dev, struct clk *clk)
1041  {
1042  	int ret;
1043  
1044  	ret = clk_rate_exclusive_get(clk);
1045  	if (ret)
1046  		return ret;
1047  
1048  	return devm_add_action_or_reset(dev, devm_clk_rate_exclusive_put, clk);
1049  }
1050  EXPORT_SYMBOL_GPL(devm_clk_rate_exclusive_get);
1051  
clk_core_unprepare(struct clk_core * core)1052  static void clk_core_unprepare(struct clk_core *core)
1053  {
1054  	lockdep_assert_held(&prepare_lock);
1055  
1056  	if (!core)
1057  		return;
1058  
1059  	if (WARN(core->prepare_count == 0,
1060  	    "%s already unprepared\n", core->name))
1061  		return;
1062  
1063  	if (WARN(core->prepare_count == 1 && core->flags & CLK_IS_CRITICAL,
1064  	    "Unpreparing critical %s\n", core->name))
1065  		return;
1066  
1067  	if (core->flags & CLK_SET_RATE_GATE)
1068  		clk_core_rate_unprotect(core);
1069  
1070  	if (--core->prepare_count > 0)
1071  		return;
1072  
1073  	WARN(core->enable_count > 0, "Unpreparing enabled %s\n", core->name);
1074  
1075  	trace_clk_unprepare(core);
1076  
1077  	if (core->ops->unprepare)
1078  		core->ops->unprepare(core->hw);
1079  
1080  	trace_clk_unprepare_complete(core);
1081  	clk_core_unprepare(core->parent);
1082  	clk_pm_runtime_put(core);
1083  }
1084  
clk_core_unprepare_lock(struct clk_core * core)1085  static void clk_core_unprepare_lock(struct clk_core *core)
1086  {
1087  	clk_prepare_lock();
1088  	clk_core_unprepare(core);
1089  	clk_prepare_unlock();
1090  }
1091  
1092  /**
1093   * clk_unprepare - undo preparation of a clock source
1094   * @clk: the clk being unprepared
1095   *
1096   * clk_unprepare may sleep, which differentiates it from clk_disable.  In a
1097   * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
1098   * if the operation may sleep.  One example is a clk which is accessed over
1099   * I2c.  In the complex case a clk gate operation may require a fast and a slow
1100   * part.  It is this reason that clk_unprepare and clk_disable are not mutually
1101   * exclusive.  In fact clk_disable must be called before clk_unprepare.
1102   */
clk_unprepare(struct clk * clk)1103  void clk_unprepare(struct clk *clk)
1104  {
1105  	if (IS_ERR_OR_NULL(clk))
1106  		return;
1107  
1108  	clk_core_unprepare_lock(clk->core);
1109  }
1110  EXPORT_SYMBOL_GPL(clk_unprepare);
1111  
clk_core_prepare(struct clk_core * core)1112  static int clk_core_prepare(struct clk_core *core)
1113  {
1114  	int ret = 0;
1115  
1116  	lockdep_assert_held(&prepare_lock);
1117  
1118  	if (!core)
1119  		return 0;
1120  
1121  	if (core->prepare_count == 0) {
1122  		ret = clk_pm_runtime_get(core);
1123  		if (ret)
1124  			return ret;
1125  
1126  		ret = clk_core_prepare(core->parent);
1127  		if (ret)
1128  			goto runtime_put;
1129  
1130  		trace_clk_prepare(core);
1131  
1132  		if (core->ops->prepare)
1133  			ret = core->ops->prepare(core->hw);
1134  
1135  		trace_clk_prepare_complete(core);
1136  
1137  		if (ret)
1138  			goto unprepare;
1139  	}
1140  
1141  	core->prepare_count++;
1142  
1143  	/*
1144  	 * CLK_SET_RATE_GATE is a special case of clock protection
1145  	 * Instead of a consumer claiming exclusive rate control, it is
1146  	 * actually the provider which prevents any consumer from making any
1147  	 * operation which could result in a rate change or rate glitch while
1148  	 * the clock is prepared.
1149  	 */
1150  	if (core->flags & CLK_SET_RATE_GATE)
1151  		clk_core_rate_protect(core);
1152  
1153  	return 0;
1154  unprepare:
1155  	clk_core_unprepare(core->parent);
1156  runtime_put:
1157  	clk_pm_runtime_put(core);
1158  	return ret;
1159  }
1160  
clk_core_prepare_lock(struct clk_core * core)1161  static int clk_core_prepare_lock(struct clk_core *core)
1162  {
1163  	int ret;
1164  
1165  	clk_prepare_lock();
1166  	ret = clk_core_prepare(core);
1167  	clk_prepare_unlock();
1168  
1169  	return ret;
1170  }
1171  
1172  /**
1173   * clk_prepare - prepare a clock source
1174   * @clk: the clk being prepared
1175   *
1176   * clk_prepare may sleep, which differentiates it from clk_enable.  In a simple
1177   * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
1178   * operation may sleep.  One example is a clk which is accessed over I2c.  In
1179   * the complex case a clk ungate operation may require a fast and a slow part.
1180   * It is this reason that clk_prepare and clk_enable are not mutually
1181   * exclusive.  In fact clk_prepare must be called before clk_enable.
1182   * Returns 0 on success, -EERROR otherwise.
1183   */
clk_prepare(struct clk * clk)1184  int clk_prepare(struct clk *clk)
1185  {
1186  	if (!clk)
1187  		return 0;
1188  
1189  	return clk_core_prepare_lock(clk->core);
1190  }
1191  EXPORT_SYMBOL_GPL(clk_prepare);
1192  
clk_core_disable(struct clk_core * core)1193  static void clk_core_disable(struct clk_core *core)
1194  {
1195  	lockdep_assert_held(&enable_lock);
1196  
1197  	if (!core)
1198  		return;
1199  
1200  	if (WARN(core->enable_count == 0, "%s already disabled\n", core->name))
1201  		return;
1202  
1203  	if (WARN(core->enable_count == 1 && core->flags & CLK_IS_CRITICAL,
1204  	    "Disabling critical %s\n", core->name))
1205  		return;
1206  
1207  	if (--core->enable_count > 0)
1208  		return;
1209  
1210  	trace_clk_disable(core);
1211  
1212  	if (core->ops->disable)
1213  		core->ops->disable(core->hw);
1214  
1215  	trace_clk_disable_complete(core);
1216  
1217  	clk_core_disable(core->parent);
1218  }
1219  
clk_core_disable_lock(struct clk_core * core)1220  static void clk_core_disable_lock(struct clk_core *core)
1221  {
1222  	unsigned long flags;
1223  
1224  	flags = clk_enable_lock();
1225  	clk_core_disable(core);
1226  	clk_enable_unlock(flags);
1227  }
1228  
1229  /**
1230   * clk_disable - gate a clock
1231   * @clk: the clk being gated
1232   *
1233   * clk_disable must not sleep, which differentiates it from clk_unprepare.  In
1234   * a simple case, clk_disable can be used instead of clk_unprepare to gate a
1235   * clk if the operation is fast and will never sleep.  One example is a
1236   * SoC-internal clk which is controlled via simple register writes.  In the
1237   * complex case a clk gate operation may require a fast and a slow part.  It is
1238   * this reason that clk_unprepare and clk_disable are not mutually exclusive.
1239   * In fact clk_disable must be called before clk_unprepare.
1240   */
clk_disable(struct clk * clk)1241  void clk_disable(struct clk *clk)
1242  {
1243  	if (IS_ERR_OR_NULL(clk))
1244  		return;
1245  
1246  	clk_core_disable_lock(clk->core);
1247  }
1248  EXPORT_SYMBOL_GPL(clk_disable);
1249  
clk_core_enable(struct clk_core * core)1250  static int clk_core_enable(struct clk_core *core)
1251  {
1252  	int ret = 0;
1253  
1254  	lockdep_assert_held(&enable_lock);
1255  
1256  	if (!core)
1257  		return 0;
1258  
1259  	if (WARN(core->prepare_count == 0,
1260  	    "Enabling unprepared %s\n", core->name))
1261  		return -ESHUTDOWN;
1262  
1263  	if (core->enable_count == 0) {
1264  		ret = clk_core_enable(core->parent);
1265  
1266  		if (ret)
1267  			return ret;
1268  
1269  		trace_clk_enable(core);
1270  
1271  		if (core->ops->enable)
1272  			ret = core->ops->enable(core->hw);
1273  
1274  		trace_clk_enable_complete(core);
1275  
1276  		if (ret) {
1277  			clk_core_disable(core->parent);
1278  			return ret;
1279  		}
1280  	}
1281  
1282  	core->enable_count++;
1283  	return 0;
1284  }
1285  
clk_core_enable_lock(struct clk_core * core)1286  static int clk_core_enable_lock(struct clk_core *core)
1287  {
1288  	unsigned long flags;
1289  	int ret;
1290  
1291  	flags = clk_enable_lock();
1292  	ret = clk_core_enable(core);
1293  	clk_enable_unlock(flags);
1294  
1295  	return ret;
1296  }
1297  
1298  /**
1299   * clk_gate_restore_context - restore context for poweroff
1300   * @hw: the clk_hw pointer of clock whose state is to be restored
1301   *
1302   * The clock gate restore context function enables or disables
1303   * the gate clocks based on the enable_count. This is done in cases
1304   * where the clock context is lost and based on the enable_count
1305   * the clock either needs to be enabled/disabled. This
1306   * helps restore the state of gate clocks.
1307   */
clk_gate_restore_context(struct clk_hw * hw)1308  void clk_gate_restore_context(struct clk_hw *hw)
1309  {
1310  	struct clk_core *core = hw->core;
1311  
1312  	if (core->enable_count)
1313  		core->ops->enable(hw);
1314  	else
1315  		core->ops->disable(hw);
1316  }
1317  EXPORT_SYMBOL_GPL(clk_gate_restore_context);
1318  
clk_core_save_context(struct clk_core * core)1319  static int clk_core_save_context(struct clk_core *core)
1320  {
1321  	struct clk_core *child;
1322  	int ret = 0;
1323  
1324  	hlist_for_each_entry(child, &core->children, child_node) {
1325  		ret = clk_core_save_context(child);
1326  		if (ret < 0)
1327  			return ret;
1328  	}
1329  
1330  	if (core->ops && core->ops->save_context)
1331  		ret = core->ops->save_context(core->hw);
1332  
1333  	return ret;
1334  }
1335  
clk_core_restore_context(struct clk_core * core)1336  static void clk_core_restore_context(struct clk_core *core)
1337  {
1338  	struct clk_core *child;
1339  
1340  	if (core->ops && core->ops->restore_context)
1341  		core->ops->restore_context(core->hw);
1342  
1343  	hlist_for_each_entry(child, &core->children, child_node)
1344  		clk_core_restore_context(child);
1345  }
1346  
1347  /**
1348   * clk_save_context - save clock context for poweroff
1349   *
1350   * Saves the context of the clock register for powerstates in which the
1351   * contents of the registers will be lost. Occurs deep within the suspend
1352   * code.  Returns 0 on success.
1353   */
clk_save_context(void)1354  int clk_save_context(void)
1355  {
1356  	struct clk_core *clk;
1357  	int ret;
1358  
1359  	hlist_for_each_entry(clk, &clk_root_list, child_node) {
1360  		ret = clk_core_save_context(clk);
1361  		if (ret < 0)
1362  			return ret;
1363  	}
1364  
1365  	hlist_for_each_entry(clk, &clk_orphan_list, child_node) {
1366  		ret = clk_core_save_context(clk);
1367  		if (ret < 0)
1368  			return ret;
1369  	}
1370  
1371  	return 0;
1372  }
1373  EXPORT_SYMBOL_GPL(clk_save_context);
1374  
1375  /**
1376   * clk_restore_context - restore clock context after poweroff
1377   *
1378   * Restore the saved clock context upon resume.
1379   *
1380   */
clk_restore_context(void)1381  void clk_restore_context(void)
1382  {
1383  	struct clk_core *core;
1384  
1385  	hlist_for_each_entry(core, &clk_root_list, child_node)
1386  		clk_core_restore_context(core);
1387  
1388  	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1389  		clk_core_restore_context(core);
1390  }
1391  EXPORT_SYMBOL_GPL(clk_restore_context);
1392  
1393  /**
1394   * clk_enable - ungate a clock
1395   * @clk: the clk being ungated
1396   *
1397   * clk_enable must not sleep, which differentiates it from clk_prepare.  In a
1398   * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
1399   * if the operation will never sleep.  One example is a SoC-internal clk which
1400   * is controlled via simple register writes.  In the complex case a clk ungate
1401   * operation may require a fast and a slow part.  It is this reason that
1402   * clk_enable and clk_prepare are not mutually exclusive.  In fact clk_prepare
1403   * must be called before clk_enable.  Returns 0 on success, -EERROR
1404   * otherwise.
1405   */
clk_enable(struct clk * clk)1406  int clk_enable(struct clk *clk)
1407  {
1408  	if (!clk)
1409  		return 0;
1410  
1411  	return clk_core_enable_lock(clk->core);
1412  }
1413  EXPORT_SYMBOL_GPL(clk_enable);
1414  
1415  /**
1416   * clk_is_enabled_when_prepared - indicate if preparing a clock also enables it.
1417   * @clk: clock source
1418   *
1419   * Returns true if clk_prepare() implicitly enables the clock, effectively
1420   * making clk_enable()/clk_disable() no-ops, false otherwise.
1421   *
1422   * This is of interest mainly to power management code where actually
1423   * disabling the clock also requires unpreparing it to have any material
1424   * effect.
1425   *
1426   * Regardless of the value returned here, the caller must always invoke
1427   * clk_enable() or clk_prepare_enable()  and counterparts for usage counts
1428   * to be right.
1429   */
clk_is_enabled_when_prepared(struct clk * clk)1430  bool clk_is_enabled_when_prepared(struct clk *clk)
1431  {
1432  	return clk && !(clk->core->ops->enable && clk->core->ops->disable);
1433  }
1434  EXPORT_SYMBOL_GPL(clk_is_enabled_when_prepared);
1435  
clk_core_prepare_enable(struct clk_core * core)1436  static int clk_core_prepare_enable(struct clk_core *core)
1437  {
1438  	int ret;
1439  
1440  	ret = clk_core_prepare_lock(core);
1441  	if (ret)
1442  		return ret;
1443  
1444  	ret = clk_core_enable_lock(core);
1445  	if (ret)
1446  		clk_core_unprepare_lock(core);
1447  
1448  	return ret;
1449  }
1450  
clk_core_disable_unprepare(struct clk_core * core)1451  static void clk_core_disable_unprepare(struct clk_core *core)
1452  {
1453  	clk_core_disable_lock(core);
1454  	clk_core_unprepare_lock(core);
1455  }
1456  
clk_unprepare_unused_subtree(struct clk_core * core)1457  static void __init clk_unprepare_unused_subtree(struct clk_core *core)
1458  {
1459  	struct clk_core *child;
1460  
1461  	lockdep_assert_held(&prepare_lock);
1462  
1463  	hlist_for_each_entry(child, &core->children, child_node)
1464  		clk_unprepare_unused_subtree(child);
1465  
1466  	if (core->prepare_count)
1467  		return;
1468  
1469  	if (core->flags & CLK_IGNORE_UNUSED)
1470  		return;
1471  
1472  	if (clk_core_is_prepared(core)) {
1473  		trace_clk_unprepare(core);
1474  		if (core->ops->unprepare_unused)
1475  			core->ops->unprepare_unused(core->hw);
1476  		else if (core->ops->unprepare)
1477  			core->ops->unprepare(core->hw);
1478  		trace_clk_unprepare_complete(core);
1479  	}
1480  }
1481  
clk_disable_unused_subtree(struct clk_core * core)1482  static void __init clk_disable_unused_subtree(struct clk_core *core)
1483  {
1484  	struct clk_core *child;
1485  	unsigned long flags;
1486  
1487  	lockdep_assert_held(&prepare_lock);
1488  
1489  	hlist_for_each_entry(child, &core->children, child_node)
1490  		clk_disable_unused_subtree(child);
1491  
1492  	if (core->flags & CLK_OPS_PARENT_ENABLE)
1493  		clk_core_prepare_enable(core->parent);
1494  
1495  	flags = clk_enable_lock();
1496  
1497  	if (core->enable_count)
1498  		goto unlock_out;
1499  
1500  	if (core->flags & CLK_IGNORE_UNUSED)
1501  		goto unlock_out;
1502  
1503  	/*
1504  	 * some gate clocks have special needs during the disable-unused
1505  	 * sequence.  call .disable_unused if available, otherwise fall
1506  	 * back to .disable
1507  	 */
1508  	if (clk_core_is_enabled(core)) {
1509  		trace_clk_disable(core);
1510  		if (core->ops->disable_unused)
1511  			core->ops->disable_unused(core->hw);
1512  		else if (core->ops->disable)
1513  			core->ops->disable(core->hw);
1514  		trace_clk_disable_complete(core);
1515  	}
1516  
1517  unlock_out:
1518  	clk_enable_unlock(flags);
1519  	if (core->flags & CLK_OPS_PARENT_ENABLE)
1520  		clk_core_disable_unprepare(core->parent);
1521  }
1522  
1523  static bool clk_ignore_unused __initdata;
clk_ignore_unused_setup(char * __unused)1524  static int __init clk_ignore_unused_setup(char *__unused)
1525  {
1526  	clk_ignore_unused = true;
1527  	return 1;
1528  }
1529  __setup("clk_ignore_unused", clk_ignore_unused_setup);
1530  
clk_disable_unused(void)1531  static int __init clk_disable_unused(void)
1532  {
1533  	struct clk_core *core;
1534  	int ret;
1535  
1536  	if (clk_ignore_unused) {
1537  		pr_warn("clk: Not disabling unused clocks\n");
1538  		return 0;
1539  	}
1540  
1541  	pr_info("clk: Disabling unused clocks\n");
1542  
1543  	ret = clk_pm_runtime_get_all();
1544  	if (ret)
1545  		return ret;
1546  	/*
1547  	 * Grab the prepare lock to keep the clk topology stable while iterating
1548  	 * over clks.
1549  	 */
1550  	clk_prepare_lock();
1551  
1552  	hlist_for_each_entry(core, &clk_root_list, child_node)
1553  		clk_disable_unused_subtree(core);
1554  
1555  	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1556  		clk_disable_unused_subtree(core);
1557  
1558  	hlist_for_each_entry(core, &clk_root_list, child_node)
1559  		clk_unprepare_unused_subtree(core);
1560  
1561  	hlist_for_each_entry(core, &clk_orphan_list, child_node)
1562  		clk_unprepare_unused_subtree(core);
1563  
1564  	clk_prepare_unlock();
1565  
1566  	clk_pm_runtime_put_all();
1567  
1568  	return 0;
1569  }
1570  late_initcall_sync(clk_disable_unused);
1571  
clk_core_determine_round_nolock(struct clk_core * core,struct clk_rate_request * req)1572  static int clk_core_determine_round_nolock(struct clk_core *core,
1573  					   struct clk_rate_request *req)
1574  {
1575  	long rate;
1576  
1577  	lockdep_assert_held(&prepare_lock);
1578  
1579  	if (!core)
1580  		return 0;
1581  
1582  	/*
1583  	 * Some clock providers hand-craft their clk_rate_requests and
1584  	 * might not fill min_rate and max_rate.
1585  	 *
1586  	 * If it's the case, clamping the rate is equivalent to setting
1587  	 * the rate to 0 which is bad. Skip the clamping but complain so
1588  	 * that it gets fixed, hopefully.
1589  	 */
1590  	if (!req->min_rate && !req->max_rate)
1591  		pr_warn("%s: %s: clk_rate_request has initialized min or max rate.\n",
1592  			__func__, core->name);
1593  	else
1594  		req->rate = clamp(req->rate, req->min_rate, req->max_rate);
1595  
1596  	/*
1597  	 * At this point, core protection will be disabled
1598  	 * - if the provider is not protected at all
1599  	 * - if the calling consumer is the only one which has exclusivity
1600  	 *   over the provider
1601  	 */
1602  	if (clk_core_rate_is_protected(core)) {
1603  		req->rate = core->rate;
1604  	} else if (core->ops->determine_rate) {
1605  		return core->ops->determine_rate(core->hw, req);
1606  	} else if (core->ops->round_rate) {
1607  		rate = core->ops->round_rate(core->hw, req->rate,
1608  					     &req->best_parent_rate);
1609  		if (rate < 0)
1610  			return rate;
1611  
1612  		req->rate = rate;
1613  	} else {
1614  		return -EINVAL;
1615  	}
1616  
1617  	return 0;
1618  }
1619  
clk_core_init_rate_req(struct clk_core * const core,struct clk_rate_request * req,unsigned long rate)1620  static void clk_core_init_rate_req(struct clk_core * const core,
1621  				   struct clk_rate_request *req,
1622  				   unsigned long rate)
1623  {
1624  	struct clk_core *parent;
1625  
1626  	if (WARN_ON(!req))
1627  		return;
1628  
1629  	memset(req, 0, sizeof(*req));
1630  	req->max_rate = ULONG_MAX;
1631  
1632  	if (!core)
1633  		return;
1634  
1635  	req->core = core;
1636  	req->rate = rate;
1637  	clk_core_get_boundaries(core, &req->min_rate, &req->max_rate);
1638  
1639  	parent = core->parent;
1640  	if (parent) {
1641  		req->best_parent_hw = parent->hw;
1642  		req->best_parent_rate = parent->rate;
1643  	} else {
1644  		req->best_parent_hw = NULL;
1645  		req->best_parent_rate = 0;
1646  	}
1647  }
1648  
1649  /**
1650   * clk_hw_init_rate_request - Initializes a clk_rate_request
1651   * @hw: the clk for which we want to submit a rate request
1652   * @req: the clk_rate_request structure we want to initialise
1653   * @rate: the rate which is to be requested
1654   *
1655   * Initializes a clk_rate_request structure to submit to
1656   * __clk_determine_rate() or similar functions.
1657   */
clk_hw_init_rate_request(const struct clk_hw * hw,struct clk_rate_request * req,unsigned long rate)1658  void clk_hw_init_rate_request(const struct clk_hw *hw,
1659  			      struct clk_rate_request *req,
1660  			      unsigned long rate)
1661  {
1662  	if (WARN_ON(!hw || !req))
1663  		return;
1664  
1665  	clk_core_init_rate_req(hw->core, req, rate);
1666  }
1667  EXPORT_SYMBOL_GPL(clk_hw_init_rate_request);
1668  
1669  /**
1670   * clk_hw_forward_rate_request - Forwards a clk_rate_request to a clock's parent
1671   * @hw: the original clock that got the rate request
1672   * @old_req: the original clk_rate_request structure we want to forward
1673   * @parent: the clk we want to forward @old_req to
1674   * @req: the clk_rate_request structure we want to initialise
1675   * @parent_rate: The rate which is to be requested to @parent
1676   *
1677   * Initializes a clk_rate_request structure to submit to a clock parent
1678   * in __clk_determine_rate() or similar functions.
1679   */
clk_hw_forward_rate_request(const struct clk_hw * hw,const struct clk_rate_request * old_req,const struct clk_hw * parent,struct clk_rate_request * req,unsigned long parent_rate)1680  void clk_hw_forward_rate_request(const struct clk_hw *hw,
1681  				 const struct clk_rate_request *old_req,
1682  				 const struct clk_hw *parent,
1683  				 struct clk_rate_request *req,
1684  				 unsigned long parent_rate)
1685  {
1686  	if (WARN_ON(!hw || !old_req || !parent || !req))
1687  		return;
1688  
1689  	clk_core_forward_rate_req(hw->core, old_req,
1690  				  parent->core, req,
1691  				  parent_rate);
1692  }
1693  EXPORT_SYMBOL_GPL(clk_hw_forward_rate_request);
1694  
clk_core_can_round(struct clk_core * const core)1695  static bool clk_core_can_round(struct clk_core * const core)
1696  {
1697  	return core->ops->determine_rate || core->ops->round_rate;
1698  }
1699  
clk_core_round_rate_nolock(struct clk_core * core,struct clk_rate_request * req)1700  static int clk_core_round_rate_nolock(struct clk_core *core,
1701  				      struct clk_rate_request *req)
1702  {
1703  	int ret;
1704  
1705  	lockdep_assert_held(&prepare_lock);
1706  
1707  	if (!core) {
1708  		req->rate = 0;
1709  		return 0;
1710  	}
1711  
1712  	if (clk_core_can_round(core))
1713  		return clk_core_determine_round_nolock(core, req);
1714  
1715  	if (core->flags & CLK_SET_RATE_PARENT) {
1716  		struct clk_rate_request parent_req;
1717  
1718  		clk_core_forward_rate_req(core, req, core->parent, &parent_req, req->rate);
1719  
1720  		trace_clk_rate_request_start(&parent_req);
1721  
1722  		ret = clk_core_round_rate_nolock(core->parent, &parent_req);
1723  		if (ret)
1724  			return ret;
1725  
1726  		trace_clk_rate_request_done(&parent_req);
1727  
1728  		req->best_parent_rate = parent_req.rate;
1729  		req->rate = parent_req.rate;
1730  
1731  		return 0;
1732  	}
1733  
1734  	req->rate = core->rate;
1735  	return 0;
1736  }
1737  
1738  /**
1739   * __clk_determine_rate - get the closest rate actually supported by a clock
1740   * @hw: determine the rate of this clock
1741   * @req: target rate request
1742   *
1743   * Useful for clk_ops such as .set_rate and .determine_rate.
1744   */
__clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)1745  int __clk_determine_rate(struct clk_hw *hw, struct clk_rate_request *req)
1746  {
1747  	if (!hw) {
1748  		req->rate = 0;
1749  		return 0;
1750  	}
1751  
1752  	return clk_core_round_rate_nolock(hw->core, req);
1753  }
1754  EXPORT_SYMBOL_GPL(__clk_determine_rate);
1755  
1756  /**
1757   * clk_hw_round_rate() - round the given rate for a hw clk
1758   * @hw: the hw clk for which we are rounding a rate
1759   * @rate: the rate which is to be rounded
1760   *
1761   * Takes in a rate as input and rounds it to a rate that the clk can actually
1762   * use.
1763   *
1764   * Context: prepare_lock must be held.
1765   *          For clk providers to call from within clk_ops such as .round_rate,
1766   *          .determine_rate.
1767   *
1768   * Return: returns rounded rate of hw clk if clk supports round_rate operation
1769   *         else returns the parent rate.
1770   */
clk_hw_round_rate(struct clk_hw * hw,unsigned long rate)1771  unsigned long clk_hw_round_rate(struct clk_hw *hw, unsigned long rate)
1772  {
1773  	int ret;
1774  	struct clk_rate_request req;
1775  
1776  	clk_core_init_rate_req(hw->core, &req, rate);
1777  
1778  	trace_clk_rate_request_start(&req);
1779  
1780  	ret = clk_core_round_rate_nolock(hw->core, &req);
1781  	if (ret)
1782  		return 0;
1783  
1784  	trace_clk_rate_request_done(&req);
1785  
1786  	return req.rate;
1787  }
1788  EXPORT_SYMBOL_GPL(clk_hw_round_rate);
1789  
1790  /**
1791   * clk_round_rate - round the given rate for a clk
1792   * @clk: the clk for which we are rounding a rate
1793   * @rate: the rate which is to be rounded
1794   *
1795   * Takes in a rate as input and rounds it to a rate that the clk can actually
1796   * use which is then returned.  If clk doesn't support round_rate operation
1797   * then the parent rate is returned.
1798   */
clk_round_rate(struct clk * clk,unsigned long rate)1799  long clk_round_rate(struct clk *clk, unsigned long rate)
1800  {
1801  	struct clk_rate_request req;
1802  	int ret;
1803  
1804  	if (!clk)
1805  		return 0;
1806  
1807  	clk_prepare_lock();
1808  
1809  	if (clk->exclusive_count)
1810  		clk_core_rate_unprotect(clk->core);
1811  
1812  	clk_core_init_rate_req(clk->core, &req, rate);
1813  
1814  	trace_clk_rate_request_start(&req);
1815  
1816  	ret = clk_core_round_rate_nolock(clk->core, &req);
1817  
1818  	trace_clk_rate_request_done(&req);
1819  
1820  	if (clk->exclusive_count)
1821  		clk_core_rate_protect(clk->core);
1822  
1823  	clk_prepare_unlock();
1824  
1825  	if (ret)
1826  		return ret;
1827  
1828  	return req.rate;
1829  }
1830  EXPORT_SYMBOL_GPL(clk_round_rate);
1831  
1832  /**
1833   * __clk_notify - call clk notifier chain
1834   * @core: clk that is changing rate
1835   * @msg: clk notifier type (see include/linux/clk.h)
1836   * @old_rate: old clk rate
1837   * @new_rate: new clk rate
1838   *
1839   * Triggers a notifier call chain on the clk rate-change notification
1840   * for 'clk'.  Passes a pointer to the struct clk and the previous
1841   * and current rates to the notifier callback.  Intended to be called by
1842   * internal clock code only.  Returns NOTIFY_DONE from the last driver
1843   * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
1844   * a driver returns that.
1845   */
__clk_notify(struct clk_core * core,unsigned long msg,unsigned long old_rate,unsigned long new_rate)1846  static int __clk_notify(struct clk_core *core, unsigned long msg,
1847  		unsigned long old_rate, unsigned long new_rate)
1848  {
1849  	struct clk_notifier *cn;
1850  	struct clk_notifier_data cnd;
1851  	int ret = NOTIFY_DONE;
1852  
1853  	cnd.old_rate = old_rate;
1854  	cnd.new_rate = new_rate;
1855  
1856  	list_for_each_entry(cn, &clk_notifier_list, node) {
1857  		if (cn->clk->core == core) {
1858  			cnd.clk = cn->clk;
1859  			ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
1860  					&cnd);
1861  			if (ret & NOTIFY_STOP_MASK)
1862  				return ret;
1863  		}
1864  	}
1865  
1866  	return ret;
1867  }
1868  
1869  /**
1870   * __clk_recalc_accuracies
1871   * @core: first clk in the subtree
1872   *
1873   * Walks the subtree of clks starting with clk and recalculates accuracies as
1874   * it goes.  Note that if a clk does not implement the .recalc_accuracy
1875   * callback then it is assumed that the clock will take on the accuracy of its
1876   * parent.
1877   */
__clk_recalc_accuracies(struct clk_core * core)1878  static void __clk_recalc_accuracies(struct clk_core *core)
1879  {
1880  	unsigned long parent_accuracy = 0;
1881  	struct clk_core *child;
1882  
1883  	lockdep_assert_held(&prepare_lock);
1884  
1885  	if (core->parent)
1886  		parent_accuracy = core->parent->accuracy;
1887  
1888  	if (core->ops->recalc_accuracy)
1889  		core->accuracy = core->ops->recalc_accuracy(core->hw,
1890  							  parent_accuracy);
1891  	else
1892  		core->accuracy = parent_accuracy;
1893  
1894  	hlist_for_each_entry(child, &core->children, child_node)
1895  		__clk_recalc_accuracies(child);
1896  }
1897  
clk_core_get_accuracy_recalc(struct clk_core * core)1898  static long clk_core_get_accuracy_recalc(struct clk_core *core)
1899  {
1900  	if (core && (core->flags & CLK_GET_ACCURACY_NOCACHE))
1901  		__clk_recalc_accuracies(core);
1902  
1903  	return clk_core_get_accuracy_no_lock(core);
1904  }
1905  
1906  /**
1907   * clk_get_accuracy - return the accuracy of clk
1908   * @clk: the clk whose accuracy is being returned
1909   *
1910   * Simply returns the cached accuracy of the clk, unless
1911   * CLK_GET_ACCURACY_NOCACHE flag is set, which means a recalc_rate will be
1912   * issued.
1913   * If clk is NULL then returns 0.
1914   */
clk_get_accuracy(struct clk * clk)1915  long clk_get_accuracy(struct clk *clk)
1916  {
1917  	long accuracy;
1918  
1919  	if (!clk)
1920  		return 0;
1921  
1922  	clk_prepare_lock();
1923  	accuracy = clk_core_get_accuracy_recalc(clk->core);
1924  	clk_prepare_unlock();
1925  
1926  	return accuracy;
1927  }
1928  EXPORT_SYMBOL_GPL(clk_get_accuracy);
1929  
clk_recalc(struct clk_core * core,unsigned long parent_rate)1930  static unsigned long clk_recalc(struct clk_core *core,
1931  				unsigned long parent_rate)
1932  {
1933  	unsigned long rate = parent_rate;
1934  
1935  	if (core->ops->recalc_rate && !clk_pm_runtime_get(core)) {
1936  		rate = core->ops->recalc_rate(core->hw, parent_rate);
1937  		clk_pm_runtime_put(core);
1938  	}
1939  	return rate;
1940  }
1941  
1942  /**
1943   * __clk_recalc_rates
1944   * @core: first clk in the subtree
1945   * @update_req: Whether req_rate should be updated with the new rate
1946   * @msg: notification type (see include/linux/clk.h)
1947   *
1948   * Walks the subtree of clks starting with clk and recalculates rates as it
1949   * goes.  Note that if a clk does not implement the .recalc_rate callback then
1950   * it is assumed that the clock will take on the rate of its parent.
1951   *
1952   * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
1953   * if necessary.
1954   */
__clk_recalc_rates(struct clk_core * core,bool update_req,unsigned long msg)1955  static void __clk_recalc_rates(struct clk_core *core, bool update_req,
1956  			       unsigned long msg)
1957  {
1958  	unsigned long old_rate;
1959  	unsigned long parent_rate = 0;
1960  	struct clk_core *child;
1961  
1962  	lockdep_assert_held(&prepare_lock);
1963  
1964  	old_rate = core->rate;
1965  
1966  	if (core->parent)
1967  		parent_rate = core->parent->rate;
1968  
1969  	core->rate = clk_recalc(core, parent_rate);
1970  	if (update_req)
1971  		core->req_rate = core->rate;
1972  
1973  	/*
1974  	 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
1975  	 * & ABORT_RATE_CHANGE notifiers
1976  	 */
1977  	if (core->notifier_count && msg)
1978  		__clk_notify(core, msg, old_rate, core->rate);
1979  
1980  	hlist_for_each_entry(child, &core->children, child_node)
1981  		__clk_recalc_rates(child, update_req, msg);
1982  }
1983  
clk_core_get_rate_recalc(struct clk_core * core)1984  static unsigned long clk_core_get_rate_recalc(struct clk_core *core)
1985  {
1986  	if (core && (core->flags & CLK_GET_RATE_NOCACHE))
1987  		__clk_recalc_rates(core, false, 0);
1988  
1989  	return clk_core_get_rate_nolock(core);
1990  }
1991  
1992  /**
1993   * clk_get_rate - return the rate of clk
1994   * @clk: the clk whose rate is being returned
1995   *
1996   * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
1997   * is set, which means a recalc_rate will be issued. Can be called regardless of
1998   * the clock enabledness. If clk is NULL, or if an error occurred, then returns
1999   * 0.
2000   */
clk_get_rate(struct clk * clk)2001  unsigned long clk_get_rate(struct clk *clk)
2002  {
2003  	unsigned long rate;
2004  
2005  	if (!clk)
2006  		return 0;
2007  
2008  	clk_prepare_lock();
2009  	rate = clk_core_get_rate_recalc(clk->core);
2010  	clk_prepare_unlock();
2011  
2012  	return rate;
2013  }
2014  EXPORT_SYMBOL_GPL(clk_get_rate);
2015  
clk_fetch_parent_index(struct clk_core * core,struct clk_core * parent)2016  static int clk_fetch_parent_index(struct clk_core *core,
2017  				  struct clk_core *parent)
2018  {
2019  	int i;
2020  
2021  	if (!parent)
2022  		return -EINVAL;
2023  
2024  	for (i = 0; i < core->num_parents; i++) {
2025  		/* Found it first try! */
2026  		if (core->parents[i].core == parent)
2027  			return i;
2028  
2029  		/* Something else is here, so keep looking */
2030  		if (core->parents[i].core)
2031  			continue;
2032  
2033  		/* Maybe core hasn't been cached but the hw is all we know? */
2034  		if (core->parents[i].hw) {
2035  			if (core->parents[i].hw == parent->hw)
2036  				break;
2037  
2038  			/* Didn't match, but we're expecting a clk_hw */
2039  			continue;
2040  		}
2041  
2042  		/* Maybe it hasn't been cached (clk_set_parent() path) */
2043  		if (parent == clk_core_get(core, i))
2044  			break;
2045  
2046  		/* Fallback to comparing globally unique names */
2047  		if (core->parents[i].name &&
2048  		    !strcmp(parent->name, core->parents[i].name))
2049  			break;
2050  	}
2051  
2052  	if (i == core->num_parents)
2053  		return -EINVAL;
2054  
2055  	core->parents[i].core = parent;
2056  	return i;
2057  }
2058  
2059  /**
2060   * clk_hw_get_parent_index - return the index of the parent clock
2061   * @hw: clk_hw associated with the clk being consumed
2062   *
2063   * Fetches and returns the index of parent clock. Returns -EINVAL if the given
2064   * clock does not have a current parent.
2065   */
clk_hw_get_parent_index(struct clk_hw * hw)2066  int clk_hw_get_parent_index(struct clk_hw *hw)
2067  {
2068  	struct clk_hw *parent = clk_hw_get_parent(hw);
2069  
2070  	if (WARN_ON(parent == NULL))
2071  		return -EINVAL;
2072  
2073  	return clk_fetch_parent_index(hw->core, parent->core);
2074  }
2075  EXPORT_SYMBOL_GPL(clk_hw_get_parent_index);
2076  
2077  /*
2078   * Update the orphan status of @core and all its children.
2079   */
clk_core_update_orphan_status(struct clk_core * core,bool is_orphan)2080  static void clk_core_update_orphan_status(struct clk_core *core, bool is_orphan)
2081  {
2082  	struct clk_core *child;
2083  
2084  	core->orphan = is_orphan;
2085  
2086  	hlist_for_each_entry(child, &core->children, child_node)
2087  		clk_core_update_orphan_status(child, is_orphan);
2088  }
2089  
clk_reparent(struct clk_core * core,struct clk_core * new_parent)2090  static void clk_reparent(struct clk_core *core, struct clk_core *new_parent)
2091  {
2092  	bool was_orphan = core->orphan;
2093  
2094  	hlist_del(&core->child_node);
2095  
2096  	if (new_parent) {
2097  		bool becomes_orphan = new_parent->orphan;
2098  
2099  		/* avoid duplicate POST_RATE_CHANGE notifications */
2100  		if (new_parent->new_child == core)
2101  			new_parent->new_child = NULL;
2102  
2103  		hlist_add_head(&core->child_node, &new_parent->children);
2104  
2105  		if (was_orphan != becomes_orphan)
2106  			clk_core_update_orphan_status(core, becomes_orphan);
2107  	} else {
2108  		hlist_add_head(&core->child_node, &clk_orphan_list);
2109  		if (!was_orphan)
2110  			clk_core_update_orphan_status(core, true);
2111  	}
2112  
2113  	core->parent = new_parent;
2114  }
2115  
__clk_set_parent_before(struct clk_core * core,struct clk_core * parent)2116  static struct clk_core *__clk_set_parent_before(struct clk_core *core,
2117  					   struct clk_core *parent)
2118  {
2119  	unsigned long flags;
2120  	struct clk_core *old_parent = core->parent;
2121  
2122  	/*
2123  	 * 1. enable parents for CLK_OPS_PARENT_ENABLE clock
2124  	 *
2125  	 * 2. Migrate prepare state between parents and prevent race with
2126  	 * clk_enable().
2127  	 *
2128  	 * If the clock is not prepared, then a race with
2129  	 * clk_enable/disable() is impossible since we already have the
2130  	 * prepare lock (future calls to clk_enable() need to be preceded by
2131  	 * a clk_prepare()).
2132  	 *
2133  	 * If the clock is prepared, migrate the prepared state to the new
2134  	 * parent and also protect against a race with clk_enable() by
2135  	 * forcing the clock and the new parent on.  This ensures that all
2136  	 * future calls to clk_enable() are practically NOPs with respect to
2137  	 * hardware and software states.
2138  	 *
2139  	 * See also: Comment for clk_set_parent() below.
2140  	 */
2141  
2142  	/* enable old_parent & parent if CLK_OPS_PARENT_ENABLE is set */
2143  	if (core->flags & CLK_OPS_PARENT_ENABLE) {
2144  		clk_core_prepare_enable(old_parent);
2145  		clk_core_prepare_enable(parent);
2146  	}
2147  
2148  	/* migrate prepare count if > 0 */
2149  	if (core->prepare_count) {
2150  		clk_core_prepare_enable(parent);
2151  		clk_core_enable_lock(core);
2152  	}
2153  
2154  	/* update the clk tree topology */
2155  	flags = clk_enable_lock();
2156  	clk_reparent(core, parent);
2157  	clk_enable_unlock(flags);
2158  
2159  	return old_parent;
2160  }
2161  
__clk_set_parent_after(struct clk_core * core,struct clk_core * parent,struct clk_core * old_parent)2162  static void __clk_set_parent_after(struct clk_core *core,
2163  				   struct clk_core *parent,
2164  				   struct clk_core *old_parent)
2165  {
2166  	/*
2167  	 * Finish the migration of prepare state and undo the changes done
2168  	 * for preventing a race with clk_enable().
2169  	 */
2170  	if (core->prepare_count) {
2171  		clk_core_disable_lock(core);
2172  		clk_core_disable_unprepare(old_parent);
2173  	}
2174  
2175  	/* re-balance ref counting if CLK_OPS_PARENT_ENABLE is set */
2176  	if (core->flags & CLK_OPS_PARENT_ENABLE) {
2177  		clk_core_disable_unprepare(parent);
2178  		clk_core_disable_unprepare(old_parent);
2179  	}
2180  }
2181  
__clk_set_parent(struct clk_core * core,struct clk_core * parent,u8 p_index)2182  static int __clk_set_parent(struct clk_core *core, struct clk_core *parent,
2183  			    u8 p_index)
2184  {
2185  	unsigned long flags;
2186  	int ret = 0;
2187  	struct clk_core *old_parent;
2188  
2189  	old_parent = __clk_set_parent_before(core, parent);
2190  
2191  	trace_clk_set_parent(core, parent);
2192  
2193  	/* change clock input source */
2194  	if (parent && core->ops->set_parent)
2195  		ret = core->ops->set_parent(core->hw, p_index);
2196  
2197  	trace_clk_set_parent_complete(core, parent);
2198  
2199  	if (ret) {
2200  		flags = clk_enable_lock();
2201  		clk_reparent(core, old_parent);
2202  		clk_enable_unlock(flags);
2203  
2204  		__clk_set_parent_after(core, old_parent, parent);
2205  
2206  		return ret;
2207  	}
2208  
2209  	__clk_set_parent_after(core, parent, old_parent);
2210  
2211  	return 0;
2212  }
2213  
2214  /**
2215   * __clk_speculate_rates
2216   * @core: first clk in the subtree
2217   * @parent_rate: the "future" rate of clk's parent
2218   *
2219   * Walks the subtree of clks starting with clk, speculating rates as it
2220   * goes and firing off PRE_RATE_CHANGE notifications as necessary.
2221   *
2222   * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
2223   * pre-rate change notifications and returns early if no clks in the
2224   * subtree have subscribed to the notifications.  Note that if a clk does not
2225   * implement the .recalc_rate callback then it is assumed that the clock will
2226   * take on the rate of its parent.
2227   */
__clk_speculate_rates(struct clk_core * core,unsigned long parent_rate)2228  static int __clk_speculate_rates(struct clk_core *core,
2229  				 unsigned long parent_rate)
2230  {
2231  	struct clk_core *child;
2232  	unsigned long new_rate;
2233  	int ret = NOTIFY_DONE;
2234  
2235  	lockdep_assert_held(&prepare_lock);
2236  
2237  	new_rate = clk_recalc(core, parent_rate);
2238  
2239  	/* abort rate change if a driver returns NOTIFY_BAD or NOTIFY_STOP */
2240  	if (core->notifier_count)
2241  		ret = __clk_notify(core, PRE_RATE_CHANGE, core->rate, new_rate);
2242  
2243  	if (ret & NOTIFY_STOP_MASK) {
2244  		pr_debug("%s: clk notifier callback for clock %s aborted with error %d\n",
2245  				__func__, core->name, ret);
2246  		goto out;
2247  	}
2248  
2249  	hlist_for_each_entry(child, &core->children, child_node) {
2250  		ret = __clk_speculate_rates(child, new_rate);
2251  		if (ret & NOTIFY_STOP_MASK)
2252  			break;
2253  	}
2254  
2255  out:
2256  	return ret;
2257  }
2258  
clk_calc_subtree(struct clk_core * core,unsigned long new_rate,struct clk_core * new_parent,u8 p_index)2259  static void clk_calc_subtree(struct clk_core *core, unsigned long new_rate,
2260  			     struct clk_core *new_parent, u8 p_index)
2261  {
2262  	struct clk_core *child;
2263  
2264  	core->new_rate = new_rate;
2265  	core->new_parent = new_parent;
2266  	core->new_parent_index = p_index;
2267  	/* include clk in new parent's PRE_RATE_CHANGE notifications */
2268  	core->new_child = NULL;
2269  	if (new_parent && new_parent != core->parent)
2270  		new_parent->new_child = core;
2271  
2272  	hlist_for_each_entry(child, &core->children, child_node) {
2273  		child->new_rate = clk_recalc(child, new_rate);
2274  		clk_calc_subtree(child, child->new_rate, NULL, 0);
2275  	}
2276  }
2277  
2278  /*
2279   * calculate the new rates returning the topmost clock that has to be
2280   * changed.
2281   */
clk_calc_new_rates(struct clk_core * core,unsigned long rate)2282  static struct clk_core *clk_calc_new_rates(struct clk_core *core,
2283  					   unsigned long rate)
2284  {
2285  	struct clk_core *top = core;
2286  	struct clk_core *old_parent, *parent;
2287  	unsigned long best_parent_rate = 0;
2288  	unsigned long new_rate;
2289  	unsigned long min_rate;
2290  	unsigned long max_rate;
2291  	int p_index = 0;
2292  	long ret;
2293  
2294  	/* sanity */
2295  	if (IS_ERR_OR_NULL(core))
2296  		return NULL;
2297  
2298  	/* save parent rate, if it exists */
2299  	parent = old_parent = core->parent;
2300  	if (parent)
2301  		best_parent_rate = parent->rate;
2302  
2303  	clk_core_get_boundaries(core, &min_rate, &max_rate);
2304  
2305  	/* find the closest rate and parent clk/rate */
2306  	if (clk_core_can_round(core)) {
2307  		struct clk_rate_request req;
2308  
2309  		clk_core_init_rate_req(core, &req, rate);
2310  
2311  		trace_clk_rate_request_start(&req);
2312  
2313  		ret = clk_core_determine_round_nolock(core, &req);
2314  		if (ret < 0)
2315  			return NULL;
2316  
2317  		trace_clk_rate_request_done(&req);
2318  
2319  		best_parent_rate = req.best_parent_rate;
2320  		new_rate = req.rate;
2321  		parent = req.best_parent_hw ? req.best_parent_hw->core : NULL;
2322  
2323  		if (new_rate < min_rate || new_rate > max_rate)
2324  			return NULL;
2325  	} else if (!parent || !(core->flags & CLK_SET_RATE_PARENT)) {
2326  		/* pass-through clock without adjustable parent */
2327  		core->new_rate = core->rate;
2328  		return NULL;
2329  	} else {
2330  		/* pass-through clock with adjustable parent */
2331  		top = clk_calc_new_rates(parent, rate);
2332  		new_rate = parent->new_rate;
2333  		goto out;
2334  	}
2335  
2336  	/* some clocks must be gated to change parent */
2337  	if (parent != old_parent &&
2338  	    (core->flags & CLK_SET_PARENT_GATE) && core->prepare_count) {
2339  		pr_debug("%s: %s not gated but wants to reparent\n",
2340  			 __func__, core->name);
2341  		return NULL;
2342  	}
2343  
2344  	/* try finding the new parent index */
2345  	if (parent && core->num_parents > 1) {
2346  		p_index = clk_fetch_parent_index(core, parent);
2347  		if (p_index < 0) {
2348  			pr_debug("%s: clk %s can not be parent of clk %s\n",
2349  				 __func__, parent->name, core->name);
2350  			return NULL;
2351  		}
2352  	}
2353  
2354  	if ((core->flags & CLK_SET_RATE_PARENT) && parent &&
2355  	    best_parent_rate != parent->rate)
2356  		top = clk_calc_new_rates(parent, best_parent_rate);
2357  
2358  out:
2359  	clk_calc_subtree(core, new_rate, parent, p_index);
2360  
2361  	return top;
2362  }
2363  
2364  /*
2365   * Notify about rate changes in a subtree. Always walk down the whole tree
2366   * so that in case of an error we can walk down the whole tree again and
2367   * abort the change.
2368   */
clk_propagate_rate_change(struct clk_core * core,unsigned long event)2369  static struct clk_core *clk_propagate_rate_change(struct clk_core *core,
2370  						  unsigned long event)
2371  {
2372  	struct clk_core *child, *tmp_clk, *fail_clk = NULL;
2373  	int ret = NOTIFY_DONE;
2374  
2375  	if (core->rate == core->new_rate)
2376  		return NULL;
2377  
2378  	if (core->notifier_count) {
2379  		ret = __clk_notify(core, event, core->rate, core->new_rate);
2380  		if (ret & NOTIFY_STOP_MASK)
2381  			fail_clk = core;
2382  	}
2383  
2384  	hlist_for_each_entry(child, &core->children, child_node) {
2385  		/* Skip children who will be reparented to another clock */
2386  		if (child->new_parent && child->new_parent != core)
2387  			continue;
2388  		tmp_clk = clk_propagate_rate_change(child, event);
2389  		if (tmp_clk)
2390  			fail_clk = tmp_clk;
2391  	}
2392  
2393  	/* handle the new child who might not be in core->children yet */
2394  	if (core->new_child) {
2395  		tmp_clk = clk_propagate_rate_change(core->new_child, event);
2396  		if (tmp_clk)
2397  			fail_clk = tmp_clk;
2398  	}
2399  
2400  	return fail_clk;
2401  }
2402  
2403  /*
2404   * walk down a subtree and set the new rates notifying the rate
2405   * change on the way
2406   */
clk_change_rate(struct clk_core * core)2407  static void clk_change_rate(struct clk_core *core)
2408  {
2409  	struct clk_core *child;
2410  	struct hlist_node *tmp;
2411  	unsigned long old_rate;
2412  	unsigned long best_parent_rate = 0;
2413  	bool skip_set_rate = false;
2414  	struct clk_core *old_parent;
2415  	struct clk_core *parent = NULL;
2416  
2417  	old_rate = core->rate;
2418  
2419  	if (core->new_parent) {
2420  		parent = core->new_parent;
2421  		best_parent_rate = core->new_parent->rate;
2422  	} else if (core->parent) {
2423  		parent = core->parent;
2424  		best_parent_rate = core->parent->rate;
2425  	}
2426  
2427  	if (clk_pm_runtime_get(core))
2428  		return;
2429  
2430  	if (core->flags & CLK_SET_RATE_UNGATE) {
2431  		clk_core_prepare(core);
2432  		clk_core_enable_lock(core);
2433  	}
2434  
2435  	if (core->new_parent && core->new_parent != core->parent) {
2436  		old_parent = __clk_set_parent_before(core, core->new_parent);
2437  		trace_clk_set_parent(core, core->new_parent);
2438  
2439  		if (core->ops->set_rate_and_parent) {
2440  			skip_set_rate = true;
2441  			core->ops->set_rate_and_parent(core->hw, core->new_rate,
2442  					best_parent_rate,
2443  					core->new_parent_index);
2444  		} else if (core->ops->set_parent) {
2445  			core->ops->set_parent(core->hw, core->new_parent_index);
2446  		}
2447  
2448  		trace_clk_set_parent_complete(core, core->new_parent);
2449  		__clk_set_parent_after(core, core->new_parent, old_parent);
2450  	}
2451  
2452  	if (core->flags & CLK_OPS_PARENT_ENABLE)
2453  		clk_core_prepare_enable(parent);
2454  
2455  	trace_clk_set_rate(core, core->new_rate);
2456  
2457  	if (!skip_set_rate && core->ops->set_rate)
2458  		core->ops->set_rate(core->hw, core->new_rate, best_parent_rate);
2459  
2460  	trace_clk_set_rate_complete(core, core->new_rate);
2461  
2462  	core->rate = clk_recalc(core, best_parent_rate);
2463  
2464  	if (core->flags & CLK_SET_RATE_UNGATE) {
2465  		clk_core_disable_lock(core);
2466  		clk_core_unprepare(core);
2467  	}
2468  
2469  	if (core->flags & CLK_OPS_PARENT_ENABLE)
2470  		clk_core_disable_unprepare(parent);
2471  
2472  	if (core->notifier_count && old_rate != core->rate)
2473  		__clk_notify(core, POST_RATE_CHANGE, old_rate, core->rate);
2474  
2475  	if (core->flags & CLK_RECALC_NEW_RATES)
2476  		(void)clk_calc_new_rates(core, core->new_rate);
2477  
2478  	/*
2479  	 * Use safe iteration, as change_rate can actually swap parents
2480  	 * for certain clock types.
2481  	 */
2482  	hlist_for_each_entry_safe(child, tmp, &core->children, child_node) {
2483  		/* Skip children who will be reparented to another clock */
2484  		if (child->new_parent && child->new_parent != core)
2485  			continue;
2486  		clk_change_rate(child);
2487  	}
2488  
2489  	/* handle the new child who might not be in core->children yet */
2490  	if (core->new_child)
2491  		clk_change_rate(core->new_child);
2492  
2493  	clk_pm_runtime_put(core);
2494  }
2495  
clk_core_req_round_rate_nolock(struct clk_core * core,unsigned long req_rate)2496  static unsigned long clk_core_req_round_rate_nolock(struct clk_core *core,
2497  						     unsigned long req_rate)
2498  {
2499  	int ret, cnt;
2500  	struct clk_rate_request req;
2501  
2502  	lockdep_assert_held(&prepare_lock);
2503  
2504  	if (!core)
2505  		return 0;
2506  
2507  	/* simulate what the rate would be if it could be freely set */
2508  	cnt = clk_core_rate_nuke_protect(core);
2509  	if (cnt < 0)
2510  		return cnt;
2511  
2512  	clk_core_init_rate_req(core, &req, req_rate);
2513  
2514  	trace_clk_rate_request_start(&req);
2515  
2516  	ret = clk_core_round_rate_nolock(core, &req);
2517  
2518  	trace_clk_rate_request_done(&req);
2519  
2520  	/* restore the protection */
2521  	clk_core_rate_restore_protect(core, cnt);
2522  
2523  	return ret ? 0 : req.rate;
2524  }
2525  
clk_core_set_rate_nolock(struct clk_core * core,unsigned long req_rate)2526  static int clk_core_set_rate_nolock(struct clk_core *core,
2527  				    unsigned long req_rate)
2528  {
2529  	struct clk_core *top, *fail_clk;
2530  	unsigned long rate;
2531  	int ret;
2532  
2533  	if (!core)
2534  		return 0;
2535  
2536  	rate = clk_core_req_round_rate_nolock(core, req_rate);
2537  
2538  	/* bail early if nothing to do */
2539  	if (rate == clk_core_get_rate_nolock(core))
2540  		return 0;
2541  
2542  	/* fail on a direct rate set of a protected provider */
2543  	if (clk_core_rate_is_protected(core))
2544  		return -EBUSY;
2545  
2546  	/* calculate new rates and get the topmost changed clock */
2547  	top = clk_calc_new_rates(core, req_rate);
2548  	if (!top)
2549  		return -EINVAL;
2550  
2551  	ret = clk_pm_runtime_get(core);
2552  	if (ret)
2553  		return ret;
2554  
2555  	/* notify that we are about to change rates */
2556  	fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
2557  	if (fail_clk) {
2558  		pr_debug("%s: failed to set %s rate\n", __func__,
2559  				fail_clk->name);
2560  		clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
2561  		ret = -EBUSY;
2562  		goto err;
2563  	}
2564  
2565  	/* change the rates */
2566  	clk_change_rate(top);
2567  
2568  	core->req_rate = req_rate;
2569  err:
2570  	clk_pm_runtime_put(core);
2571  
2572  	return ret;
2573  }
2574  
2575  /**
2576   * clk_set_rate - specify a new rate for clk
2577   * @clk: the clk whose rate is being changed
2578   * @rate: the new rate for clk
2579   *
2580   * In the simplest case clk_set_rate will only adjust the rate of clk.
2581   *
2582   * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
2583   * propagate up to clk's parent; whether or not this happens depends on the
2584   * outcome of clk's .round_rate implementation.  If *parent_rate is unchanged
2585   * after calling .round_rate then upstream parent propagation is ignored.  If
2586   * *parent_rate comes back with a new rate for clk's parent then we propagate
2587   * up to clk's parent and set its rate.  Upward propagation will continue
2588   * until either a clk does not support the CLK_SET_RATE_PARENT flag or
2589   * .round_rate stops requesting changes to clk's parent_rate.
2590   *
2591   * Rate changes are accomplished via tree traversal that also recalculates the
2592   * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
2593   *
2594   * Returns 0 on success, -EERROR otherwise.
2595   */
clk_set_rate(struct clk * clk,unsigned long rate)2596  int clk_set_rate(struct clk *clk, unsigned long rate)
2597  {
2598  	int ret;
2599  
2600  	if (!clk)
2601  		return 0;
2602  
2603  	/* prevent racing with updates to the clock topology */
2604  	clk_prepare_lock();
2605  
2606  	if (clk->exclusive_count)
2607  		clk_core_rate_unprotect(clk->core);
2608  
2609  	ret = clk_core_set_rate_nolock(clk->core, rate);
2610  
2611  	if (clk->exclusive_count)
2612  		clk_core_rate_protect(clk->core);
2613  
2614  	clk_prepare_unlock();
2615  
2616  	return ret;
2617  }
2618  EXPORT_SYMBOL_GPL(clk_set_rate);
2619  
2620  /**
2621   * clk_set_rate_exclusive - specify a new rate and get exclusive control
2622   * @clk: the clk whose rate is being changed
2623   * @rate: the new rate for clk
2624   *
2625   * This is a combination of clk_set_rate() and clk_rate_exclusive_get()
2626   * within a critical section
2627   *
2628   * This can be used initially to ensure that at least 1 consumer is
2629   * satisfied when several consumers are competing for exclusivity over the
2630   * same clock provider.
2631   *
2632   * The exclusivity is not applied if setting the rate failed.
2633   *
2634   * Calls to clk_rate_exclusive_get() should be balanced with calls to
2635   * clk_rate_exclusive_put().
2636   *
2637   * Returns 0 on success, -EERROR otherwise.
2638   */
clk_set_rate_exclusive(struct clk * clk,unsigned long rate)2639  int clk_set_rate_exclusive(struct clk *clk, unsigned long rate)
2640  {
2641  	int ret;
2642  
2643  	if (!clk)
2644  		return 0;
2645  
2646  	/* prevent racing with updates to the clock topology */
2647  	clk_prepare_lock();
2648  
2649  	/*
2650  	 * The temporary protection removal is not here, on purpose
2651  	 * This function is meant to be used instead of clk_rate_protect,
2652  	 * so before the consumer code path protect the clock provider
2653  	 */
2654  
2655  	ret = clk_core_set_rate_nolock(clk->core, rate);
2656  	if (!ret) {
2657  		clk_core_rate_protect(clk->core);
2658  		clk->exclusive_count++;
2659  	}
2660  
2661  	clk_prepare_unlock();
2662  
2663  	return ret;
2664  }
2665  EXPORT_SYMBOL_GPL(clk_set_rate_exclusive);
2666  
clk_set_rate_range_nolock(struct clk * clk,unsigned long min,unsigned long max)2667  static int clk_set_rate_range_nolock(struct clk *clk,
2668  				     unsigned long min,
2669  				     unsigned long max)
2670  {
2671  	int ret = 0;
2672  	unsigned long old_min, old_max, rate;
2673  
2674  	lockdep_assert_held(&prepare_lock);
2675  
2676  	if (!clk)
2677  		return 0;
2678  
2679  	trace_clk_set_rate_range(clk->core, min, max);
2680  
2681  	if (min > max) {
2682  		pr_err("%s: clk %s dev %s con %s: invalid range [%lu, %lu]\n",
2683  		       __func__, clk->core->name, clk->dev_id, clk->con_id,
2684  		       min, max);
2685  		return -EINVAL;
2686  	}
2687  
2688  	if (clk->exclusive_count)
2689  		clk_core_rate_unprotect(clk->core);
2690  
2691  	/* Save the current values in case we need to rollback the change */
2692  	old_min = clk->min_rate;
2693  	old_max = clk->max_rate;
2694  	clk->min_rate = min;
2695  	clk->max_rate = max;
2696  
2697  	if (!clk_core_check_boundaries(clk->core, min, max)) {
2698  		ret = -EINVAL;
2699  		goto out;
2700  	}
2701  
2702  	rate = clk->core->req_rate;
2703  	if (clk->core->flags & CLK_GET_RATE_NOCACHE)
2704  		rate = clk_core_get_rate_recalc(clk->core);
2705  
2706  	/*
2707  	 * Since the boundaries have been changed, let's give the
2708  	 * opportunity to the provider to adjust the clock rate based on
2709  	 * the new boundaries.
2710  	 *
2711  	 * We also need to handle the case where the clock is currently
2712  	 * outside of the boundaries. Clamping the last requested rate
2713  	 * to the current minimum and maximum will also handle this.
2714  	 *
2715  	 * FIXME:
2716  	 * There is a catch. It may fail for the usual reason (clock
2717  	 * broken, clock protected, etc) but also because:
2718  	 * - round_rate() was not favorable and fell on the wrong
2719  	 *   side of the boundary
2720  	 * - the determine_rate() callback does not really check for
2721  	 *   this corner case when determining the rate
2722  	 */
2723  	rate = clamp(rate, min, max);
2724  	ret = clk_core_set_rate_nolock(clk->core, rate);
2725  	if (ret) {
2726  		/* rollback the changes */
2727  		clk->min_rate = old_min;
2728  		clk->max_rate = old_max;
2729  	}
2730  
2731  out:
2732  	if (clk->exclusive_count)
2733  		clk_core_rate_protect(clk->core);
2734  
2735  	return ret;
2736  }
2737  
2738  /**
2739   * clk_set_rate_range - set a rate range for a clock source
2740   * @clk: clock source
2741   * @min: desired minimum clock rate in Hz, inclusive
2742   * @max: desired maximum clock rate in Hz, inclusive
2743   *
2744   * Return: 0 for success or negative errno on failure.
2745   */
clk_set_rate_range(struct clk * clk,unsigned long min,unsigned long max)2746  int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
2747  {
2748  	int ret;
2749  
2750  	if (!clk)
2751  		return 0;
2752  
2753  	clk_prepare_lock();
2754  
2755  	ret = clk_set_rate_range_nolock(clk, min, max);
2756  
2757  	clk_prepare_unlock();
2758  
2759  	return ret;
2760  }
2761  EXPORT_SYMBOL_GPL(clk_set_rate_range);
2762  
2763  /**
2764   * clk_set_min_rate - set a minimum clock rate for a clock source
2765   * @clk: clock source
2766   * @rate: desired minimum clock rate in Hz, inclusive
2767   *
2768   * Returns success (0) or negative errno.
2769   */
clk_set_min_rate(struct clk * clk,unsigned long rate)2770  int clk_set_min_rate(struct clk *clk, unsigned long rate)
2771  {
2772  	if (!clk)
2773  		return 0;
2774  
2775  	trace_clk_set_min_rate(clk->core, rate);
2776  
2777  	return clk_set_rate_range(clk, rate, clk->max_rate);
2778  }
2779  EXPORT_SYMBOL_GPL(clk_set_min_rate);
2780  
2781  /**
2782   * clk_set_max_rate - set a maximum clock rate for a clock source
2783   * @clk: clock source
2784   * @rate: desired maximum clock rate in Hz, inclusive
2785   *
2786   * Returns success (0) or negative errno.
2787   */
clk_set_max_rate(struct clk * clk,unsigned long rate)2788  int clk_set_max_rate(struct clk *clk, unsigned long rate)
2789  {
2790  	if (!clk)
2791  		return 0;
2792  
2793  	trace_clk_set_max_rate(clk->core, rate);
2794  
2795  	return clk_set_rate_range(clk, clk->min_rate, rate);
2796  }
2797  EXPORT_SYMBOL_GPL(clk_set_max_rate);
2798  
2799  /**
2800   * clk_get_parent - return the parent of a clk
2801   * @clk: the clk whose parent gets returned
2802   *
2803   * Simply returns clk->parent.  Returns NULL if clk is NULL.
2804   */
clk_get_parent(struct clk * clk)2805  struct clk *clk_get_parent(struct clk *clk)
2806  {
2807  	struct clk *parent;
2808  
2809  	if (!clk)
2810  		return NULL;
2811  
2812  	clk_prepare_lock();
2813  	/* TODO: Create a per-user clk and change callers to call clk_put */
2814  	parent = !clk->core->parent ? NULL : clk->core->parent->hw->clk;
2815  	clk_prepare_unlock();
2816  
2817  	return parent;
2818  }
2819  EXPORT_SYMBOL_GPL(clk_get_parent);
2820  
__clk_init_parent(struct clk_core * core)2821  static struct clk_core *__clk_init_parent(struct clk_core *core)
2822  {
2823  	u8 index = 0;
2824  
2825  	if (core->num_parents > 1 && core->ops->get_parent)
2826  		index = core->ops->get_parent(core->hw);
2827  
2828  	return clk_core_get_parent_by_index(core, index);
2829  }
2830  
clk_core_reparent(struct clk_core * core,struct clk_core * new_parent)2831  static void clk_core_reparent(struct clk_core *core,
2832  				  struct clk_core *new_parent)
2833  {
2834  	clk_reparent(core, new_parent);
2835  	__clk_recalc_accuracies(core);
2836  	__clk_recalc_rates(core, true, POST_RATE_CHANGE);
2837  }
2838  
clk_hw_reparent(struct clk_hw * hw,struct clk_hw * new_parent)2839  void clk_hw_reparent(struct clk_hw *hw, struct clk_hw *new_parent)
2840  {
2841  	if (!hw)
2842  		return;
2843  
2844  	clk_core_reparent(hw->core, !new_parent ? NULL : new_parent->core);
2845  }
2846  
2847  /**
2848   * clk_has_parent - check if a clock is a possible parent for another
2849   * @clk: clock source
2850   * @parent: parent clock source
2851   *
2852   * This function can be used in drivers that need to check that a clock can be
2853   * the parent of another without actually changing the parent.
2854   *
2855   * Returns true if @parent is a possible parent for @clk, false otherwise.
2856   */
clk_has_parent(const struct clk * clk,const struct clk * parent)2857  bool clk_has_parent(const struct clk *clk, const struct clk *parent)
2858  {
2859  	/* NULL clocks should be nops, so return success if either is NULL. */
2860  	if (!clk || !parent)
2861  		return true;
2862  
2863  	return clk_core_has_parent(clk->core, parent->core);
2864  }
2865  EXPORT_SYMBOL_GPL(clk_has_parent);
2866  
clk_core_set_parent_nolock(struct clk_core * core,struct clk_core * parent)2867  static int clk_core_set_parent_nolock(struct clk_core *core,
2868  				      struct clk_core *parent)
2869  {
2870  	int ret = 0;
2871  	int p_index = 0;
2872  	unsigned long p_rate = 0;
2873  
2874  	lockdep_assert_held(&prepare_lock);
2875  
2876  	if (!core)
2877  		return 0;
2878  
2879  	if (core->parent == parent)
2880  		return 0;
2881  
2882  	/* verify ops for multi-parent clks */
2883  	if (core->num_parents > 1 && !core->ops->set_parent)
2884  		return -EPERM;
2885  
2886  	/* check that we are allowed to re-parent if the clock is in use */
2887  	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
2888  		return -EBUSY;
2889  
2890  	if (clk_core_rate_is_protected(core))
2891  		return -EBUSY;
2892  
2893  	/* try finding the new parent index */
2894  	if (parent) {
2895  		p_index = clk_fetch_parent_index(core, parent);
2896  		if (p_index < 0) {
2897  			pr_debug("%s: clk %s can not be parent of clk %s\n",
2898  					__func__, parent->name, core->name);
2899  			return p_index;
2900  		}
2901  		p_rate = parent->rate;
2902  	}
2903  
2904  	ret = clk_pm_runtime_get(core);
2905  	if (ret)
2906  		return ret;
2907  
2908  	/* propagate PRE_RATE_CHANGE notifications */
2909  	ret = __clk_speculate_rates(core, p_rate);
2910  
2911  	/* abort if a driver objects */
2912  	if (ret & NOTIFY_STOP_MASK)
2913  		goto runtime_put;
2914  
2915  	/* do the re-parent */
2916  	ret = __clk_set_parent(core, parent, p_index);
2917  
2918  	/* propagate rate an accuracy recalculation accordingly */
2919  	if (ret) {
2920  		__clk_recalc_rates(core, true, ABORT_RATE_CHANGE);
2921  	} else {
2922  		__clk_recalc_rates(core, true, POST_RATE_CHANGE);
2923  		__clk_recalc_accuracies(core);
2924  	}
2925  
2926  runtime_put:
2927  	clk_pm_runtime_put(core);
2928  
2929  	return ret;
2930  }
2931  
clk_hw_set_parent(struct clk_hw * hw,struct clk_hw * parent)2932  int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *parent)
2933  {
2934  	return clk_core_set_parent_nolock(hw->core, parent->core);
2935  }
2936  EXPORT_SYMBOL_GPL(clk_hw_set_parent);
2937  
2938  /**
2939   * clk_set_parent - switch the parent of a mux clk
2940   * @clk: the mux clk whose input we are switching
2941   * @parent: the new input to clk
2942   *
2943   * Re-parent clk to use parent as its new input source.  If clk is in
2944   * prepared state, the clk will get enabled for the duration of this call. If
2945   * that's not acceptable for a specific clk (Eg: the consumer can't handle
2946   * that, the reparenting is glitchy in hardware, etc), use the
2947   * CLK_SET_PARENT_GATE flag to allow reparenting only when clk is unprepared.
2948   *
2949   * After successfully changing clk's parent clk_set_parent will update the
2950   * clk topology, sysfs topology and propagate rate recalculation via
2951   * __clk_recalc_rates.
2952   *
2953   * Returns 0 on success, -EERROR otherwise.
2954   */
clk_set_parent(struct clk * clk,struct clk * parent)2955  int clk_set_parent(struct clk *clk, struct clk *parent)
2956  {
2957  	int ret;
2958  
2959  	if (!clk)
2960  		return 0;
2961  
2962  	clk_prepare_lock();
2963  
2964  	if (clk->exclusive_count)
2965  		clk_core_rate_unprotect(clk->core);
2966  
2967  	ret = clk_core_set_parent_nolock(clk->core,
2968  					 parent ? parent->core : NULL);
2969  
2970  	if (clk->exclusive_count)
2971  		clk_core_rate_protect(clk->core);
2972  
2973  	clk_prepare_unlock();
2974  
2975  	return ret;
2976  }
2977  EXPORT_SYMBOL_GPL(clk_set_parent);
2978  
clk_core_set_phase_nolock(struct clk_core * core,int degrees)2979  static int clk_core_set_phase_nolock(struct clk_core *core, int degrees)
2980  {
2981  	int ret = -EINVAL;
2982  
2983  	lockdep_assert_held(&prepare_lock);
2984  
2985  	if (!core)
2986  		return 0;
2987  
2988  	if (clk_core_rate_is_protected(core))
2989  		return -EBUSY;
2990  
2991  	trace_clk_set_phase(core, degrees);
2992  
2993  	if (core->ops->set_phase) {
2994  		ret = core->ops->set_phase(core->hw, degrees);
2995  		if (!ret)
2996  			core->phase = degrees;
2997  	}
2998  
2999  	trace_clk_set_phase_complete(core, degrees);
3000  
3001  	return ret;
3002  }
3003  
3004  /**
3005   * clk_set_phase - adjust the phase shift of a clock signal
3006   * @clk: clock signal source
3007   * @degrees: number of degrees the signal is shifted
3008   *
3009   * Shifts the phase of a clock signal by the specified
3010   * degrees. Returns 0 on success, -EERROR otherwise.
3011   *
3012   * This function makes no distinction about the input or reference
3013   * signal that we adjust the clock signal phase against. For example
3014   * phase locked-loop clock signal generators we may shift phase with
3015   * respect to feedback clock signal input, but for other cases the
3016   * clock phase may be shifted with respect to some other, unspecified
3017   * signal.
3018   *
3019   * Additionally the concept of phase shift does not propagate through
3020   * the clock tree hierarchy, which sets it apart from clock rates and
3021   * clock accuracy. A parent clock phase attribute does not have an
3022   * impact on the phase attribute of a child clock.
3023   */
clk_set_phase(struct clk * clk,int degrees)3024  int clk_set_phase(struct clk *clk, int degrees)
3025  {
3026  	int ret;
3027  
3028  	if (!clk)
3029  		return 0;
3030  
3031  	/* sanity check degrees */
3032  	degrees %= 360;
3033  	if (degrees < 0)
3034  		degrees += 360;
3035  
3036  	clk_prepare_lock();
3037  
3038  	if (clk->exclusive_count)
3039  		clk_core_rate_unprotect(clk->core);
3040  
3041  	ret = clk_core_set_phase_nolock(clk->core, degrees);
3042  
3043  	if (clk->exclusive_count)
3044  		clk_core_rate_protect(clk->core);
3045  
3046  	clk_prepare_unlock();
3047  
3048  	return ret;
3049  }
3050  EXPORT_SYMBOL_GPL(clk_set_phase);
3051  
clk_core_get_phase(struct clk_core * core)3052  static int clk_core_get_phase(struct clk_core *core)
3053  {
3054  	int ret;
3055  
3056  	lockdep_assert_held(&prepare_lock);
3057  	if (!core->ops->get_phase)
3058  		return 0;
3059  
3060  	/* Always try to update cached phase if possible */
3061  	ret = core->ops->get_phase(core->hw);
3062  	if (ret >= 0)
3063  		core->phase = ret;
3064  
3065  	return ret;
3066  }
3067  
3068  /**
3069   * clk_get_phase - return the phase shift of a clock signal
3070   * @clk: clock signal source
3071   *
3072   * Returns the phase shift of a clock node in degrees, otherwise returns
3073   * -EERROR.
3074   */
clk_get_phase(struct clk * clk)3075  int clk_get_phase(struct clk *clk)
3076  {
3077  	int ret;
3078  
3079  	if (!clk)
3080  		return 0;
3081  
3082  	clk_prepare_lock();
3083  	ret = clk_core_get_phase(clk->core);
3084  	clk_prepare_unlock();
3085  
3086  	return ret;
3087  }
3088  EXPORT_SYMBOL_GPL(clk_get_phase);
3089  
clk_core_reset_duty_cycle_nolock(struct clk_core * core)3090  static void clk_core_reset_duty_cycle_nolock(struct clk_core *core)
3091  {
3092  	/* Assume a default value of 50% */
3093  	core->duty.num = 1;
3094  	core->duty.den = 2;
3095  }
3096  
3097  static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core);
3098  
clk_core_update_duty_cycle_nolock(struct clk_core * core)3099  static int clk_core_update_duty_cycle_nolock(struct clk_core *core)
3100  {
3101  	struct clk_duty *duty = &core->duty;
3102  	int ret = 0;
3103  
3104  	if (!core->ops->get_duty_cycle)
3105  		return clk_core_update_duty_cycle_parent_nolock(core);
3106  
3107  	ret = core->ops->get_duty_cycle(core->hw, duty);
3108  	if (ret)
3109  		goto reset;
3110  
3111  	/* Don't trust the clock provider too much */
3112  	if (duty->den == 0 || duty->num > duty->den) {
3113  		ret = -EINVAL;
3114  		goto reset;
3115  	}
3116  
3117  	return 0;
3118  
3119  reset:
3120  	clk_core_reset_duty_cycle_nolock(core);
3121  	return ret;
3122  }
3123  
clk_core_update_duty_cycle_parent_nolock(struct clk_core * core)3124  static int clk_core_update_duty_cycle_parent_nolock(struct clk_core *core)
3125  {
3126  	int ret = 0;
3127  
3128  	if (core->parent &&
3129  	    core->flags & CLK_DUTY_CYCLE_PARENT) {
3130  		ret = clk_core_update_duty_cycle_nolock(core->parent);
3131  		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
3132  	} else {
3133  		clk_core_reset_duty_cycle_nolock(core);
3134  	}
3135  
3136  	return ret;
3137  }
3138  
3139  static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
3140  						 struct clk_duty *duty);
3141  
clk_core_set_duty_cycle_nolock(struct clk_core * core,struct clk_duty * duty)3142  static int clk_core_set_duty_cycle_nolock(struct clk_core *core,
3143  					  struct clk_duty *duty)
3144  {
3145  	int ret;
3146  
3147  	lockdep_assert_held(&prepare_lock);
3148  
3149  	if (clk_core_rate_is_protected(core))
3150  		return -EBUSY;
3151  
3152  	trace_clk_set_duty_cycle(core, duty);
3153  
3154  	if (!core->ops->set_duty_cycle)
3155  		return clk_core_set_duty_cycle_parent_nolock(core, duty);
3156  
3157  	ret = core->ops->set_duty_cycle(core->hw, duty);
3158  	if (!ret)
3159  		memcpy(&core->duty, duty, sizeof(*duty));
3160  
3161  	trace_clk_set_duty_cycle_complete(core, duty);
3162  
3163  	return ret;
3164  }
3165  
clk_core_set_duty_cycle_parent_nolock(struct clk_core * core,struct clk_duty * duty)3166  static int clk_core_set_duty_cycle_parent_nolock(struct clk_core *core,
3167  						 struct clk_duty *duty)
3168  {
3169  	int ret = 0;
3170  
3171  	if (core->parent &&
3172  	    core->flags & (CLK_DUTY_CYCLE_PARENT | CLK_SET_RATE_PARENT)) {
3173  		ret = clk_core_set_duty_cycle_nolock(core->parent, duty);
3174  		memcpy(&core->duty, &core->parent->duty, sizeof(core->duty));
3175  	}
3176  
3177  	return ret;
3178  }
3179  
3180  /**
3181   * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
3182   * @clk: clock signal source
3183   * @num: numerator of the duty cycle ratio to be applied
3184   * @den: denominator of the duty cycle ratio to be applied
3185   *
3186   * Apply the duty cycle ratio if the ratio is valid and the clock can
3187   * perform this operation
3188   *
3189   * Returns (0) on success, a negative errno otherwise.
3190   */
clk_set_duty_cycle(struct clk * clk,unsigned int num,unsigned int den)3191  int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den)
3192  {
3193  	int ret;
3194  	struct clk_duty duty;
3195  
3196  	if (!clk)
3197  		return 0;
3198  
3199  	/* sanity check the ratio */
3200  	if (den == 0 || num > den)
3201  		return -EINVAL;
3202  
3203  	duty.num = num;
3204  	duty.den = den;
3205  
3206  	clk_prepare_lock();
3207  
3208  	if (clk->exclusive_count)
3209  		clk_core_rate_unprotect(clk->core);
3210  
3211  	ret = clk_core_set_duty_cycle_nolock(clk->core, &duty);
3212  
3213  	if (clk->exclusive_count)
3214  		clk_core_rate_protect(clk->core);
3215  
3216  	clk_prepare_unlock();
3217  
3218  	return ret;
3219  }
3220  EXPORT_SYMBOL_GPL(clk_set_duty_cycle);
3221  
clk_core_get_scaled_duty_cycle(struct clk_core * core,unsigned int scale)3222  static int clk_core_get_scaled_duty_cycle(struct clk_core *core,
3223  					  unsigned int scale)
3224  {
3225  	struct clk_duty *duty = &core->duty;
3226  	int ret;
3227  
3228  	clk_prepare_lock();
3229  
3230  	ret = clk_core_update_duty_cycle_nolock(core);
3231  	if (!ret)
3232  		ret = mult_frac(scale, duty->num, duty->den);
3233  
3234  	clk_prepare_unlock();
3235  
3236  	return ret;
3237  }
3238  
3239  /**
3240   * clk_get_scaled_duty_cycle - return the duty cycle ratio of a clock signal
3241   * @clk: clock signal source
3242   * @scale: scaling factor to be applied to represent the ratio as an integer
3243   *
3244   * Returns the duty cycle ratio of a clock node multiplied by the provided
3245   * scaling factor, or negative errno on error.
3246   */
clk_get_scaled_duty_cycle(struct clk * clk,unsigned int scale)3247  int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale)
3248  {
3249  	if (!clk)
3250  		return 0;
3251  
3252  	return clk_core_get_scaled_duty_cycle(clk->core, scale);
3253  }
3254  EXPORT_SYMBOL_GPL(clk_get_scaled_duty_cycle);
3255  
3256  /**
3257   * clk_is_match - check if two clk's point to the same hardware clock
3258   * @p: clk compared against q
3259   * @q: clk compared against p
3260   *
3261   * Returns true if the two struct clk pointers both point to the same hardware
3262   * clock node. Put differently, returns true if struct clk *p and struct clk *q
3263   * share the same struct clk_core object.
3264   *
3265   * Returns false otherwise. Note that two NULL clks are treated as matching.
3266   */
clk_is_match(const struct clk * p,const struct clk * q)3267  bool clk_is_match(const struct clk *p, const struct clk *q)
3268  {
3269  	/* trivial case: identical struct clk's or both NULL */
3270  	if (p == q)
3271  		return true;
3272  
3273  	/* true if clk->core pointers match. Avoid dereferencing garbage */
3274  	if (!IS_ERR_OR_NULL(p) && !IS_ERR_OR_NULL(q))
3275  		if (p->core == q->core)
3276  			return true;
3277  
3278  	return false;
3279  }
3280  EXPORT_SYMBOL_GPL(clk_is_match);
3281  
3282  /***        debugfs support        ***/
3283  
3284  #ifdef CONFIG_DEBUG_FS
3285  #include <linux/debugfs.h>
3286  
3287  static struct dentry *rootdir;
3288  static int inited = 0;
3289  static DEFINE_MUTEX(clk_debug_lock);
3290  static HLIST_HEAD(clk_debug_list);
3291  
3292  static struct hlist_head *orphan_list[] = {
3293  	&clk_orphan_list,
3294  	NULL,
3295  };
3296  
clk_summary_show_one(struct seq_file * s,struct clk_core * c,int level)3297  static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
3298  				 int level)
3299  {
3300  	int phase;
3301  	struct clk *clk_user;
3302  	int multi_node = 0;
3303  
3304  	seq_printf(s, "%*s%-*s %-7d %-8d %-8d %-11lu %-10lu ",
3305  		   level * 3 + 1, "",
3306  		   35 - level * 3, c->name,
3307  		   c->enable_count, c->prepare_count, c->protect_count,
3308  		   clk_core_get_rate_recalc(c),
3309  		   clk_core_get_accuracy_recalc(c));
3310  
3311  	phase = clk_core_get_phase(c);
3312  	if (phase >= 0)
3313  		seq_printf(s, "%-5d", phase);
3314  	else
3315  		seq_puts(s, "-----");
3316  
3317  	seq_printf(s, " %-6d", clk_core_get_scaled_duty_cycle(c, 100000));
3318  
3319  	if (c->ops->is_enabled)
3320  		seq_printf(s, " %5c ", clk_core_is_enabled(c) ? 'Y' : 'N');
3321  	else if (!c->ops->enable)
3322  		seq_printf(s, " %5c ", 'Y');
3323  	else
3324  		seq_printf(s, " %5c ", '?');
3325  
3326  	hlist_for_each_entry(clk_user, &c->clks, clks_node) {
3327  		seq_printf(s, "%*s%-*s  %-25s\n",
3328  			   level * 3 + 2 + 105 * multi_node, "",
3329  			   30,
3330  			   clk_user->dev_id ? clk_user->dev_id : "deviceless",
3331  			   clk_user->con_id ? clk_user->con_id : "no_connection_id");
3332  
3333  		multi_node = 1;
3334  	}
3335  
3336  }
3337  
clk_summary_show_subtree(struct seq_file * s,struct clk_core * c,int level)3338  static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
3339  				     int level)
3340  {
3341  	struct clk_core *child;
3342  
3343  	clk_summary_show_one(s, c, level);
3344  
3345  	hlist_for_each_entry(child, &c->children, child_node)
3346  		clk_summary_show_subtree(s, child, level + 1);
3347  }
3348  
clk_summary_show(struct seq_file * s,void * data)3349  static int clk_summary_show(struct seq_file *s, void *data)
3350  {
3351  	struct clk_core *c;
3352  	struct hlist_head **lists = s->private;
3353  	int ret;
3354  
3355  	seq_puts(s, "                                 enable  prepare  protect                                duty  hardware                            connection\n");
3356  	seq_puts(s, "   clock                          count    count    count        rate   accuracy phase  cycle    enable   consumer                         id\n");
3357  	seq_puts(s, "---------------------------------------------------------------------------------------------------------------------------------------------\n");
3358  
3359  	ret = clk_pm_runtime_get_all();
3360  	if (ret)
3361  		return ret;
3362  
3363  	clk_prepare_lock();
3364  
3365  	for (; *lists; lists++)
3366  		hlist_for_each_entry(c, *lists, child_node)
3367  			clk_summary_show_subtree(s, c, 0);
3368  
3369  	clk_prepare_unlock();
3370  	clk_pm_runtime_put_all();
3371  
3372  	return 0;
3373  }
3374  DEFINE_SHOW_ATTRIBUTE(clk_summary);
3375  
clk_dump_one(struct seq_file * s,struct clk_core * c,int level)3376  static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
3377  {
3378  	int phase;
3379  	unsigned long min_rate, max_rate;
3380  
3381  	clk_core_get_boundaries(c, &min_rate, &max_rate);
3382  
3383  	/* This should be JSON format, i.e. elements separated with a comma */
3384  	seq_printf(s, "\"%s\": { ", c->name);
3385  	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
3386  	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
3387  	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
3388  	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate_recalc(c));
3389  	seq_printf(s, "\"min_rate\": %lu,", min_rate);
3390  	seq_printf(s, "\"max_rate\": %lu,", max_rate);
3391  	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy_recalc(c));
3392  	phase = clk_core_get_phase(c);
3393  	if (phase >= 0)
3394  		seq_printf(s, "\"phase\": %d,", phase);
3395  	seq_printf(s, "\"duty_cycle\": %u",
3396  		   clk_core_get_scaled_duty_cycle(c, 100000));
3397  }
3398  
clk_dump_subtree(struct seq_file * s,struct clk_core * c,int level)3399  static void clk_dump_subtree(struct seq_file *s, struct clk_core *c, int level)
3400  {
3401  	struct clk_core *child;
3402  
3403  	clk_dump_one(s, c, level);
3404  
3405  	hlist_for_each_entry(child, &c->children, child_node) {
3406  		seq_putc(s, ',');
3407  		clk_dump_subtree(s, child, level + 1);
3408  	}
3409  
3410  	seq_putc(s, '}');
3411  }
3412  
clk_dump_show(struct seq_file * s,void * data)3413  static int clk_dump_show(struct seq_file *s, void *data)
3414  {
3415  	struct clk_core *c;
3416  	bool first_node = true;
3417  	struct hlist_head **lists = s->private;
3418  	int ret;
3419  
3420  	ret = clk_pm_runtime_get_all();
3421  	if (ret)
3422  		return ret;
3423  
3424  	seq_putc(s, '{');
3425  
3426  	clk_prepare_lock();
3427  
3428  	for (; *lists; lists++) {
3429  		hlist_for_each_entry(c, *lists, child_node) {
3430  			if (!first_node)
3431  				seq_putc(s, ',');
3432  			first_node = false;
3433  			clk_dump_subtree(s, c, 0);
3434  		}
3435  	}
3436  
3437  	clk_prepare_unlock();
3438  	clk_pm_runtime_put_all();
3439  
3440  	seq_puts(s, "}\n");
3441  	return 0;
3442  }
3443  DEFINE_SHOW_ATTRIBUTE(clk_dump);
3444  
3445  #undef CLOCK_ALLOW_WRITE_DEBUGFS
3446  #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3447  /*
3448   * This can be dangerous, therefore don't provide any real compile time
3449   * configuration option for this feature.
3450   * People who want to use this will need to modify the source code directly.
3451   */
clk_rate_set(void * data,u64 val)3452  static int clk_rate_set(void *data, u64 val)
3453  {
3454  	struct clk_core *core = data;
3455  	int ret;
3456  
3457  	clk_prepare_lock();
3458  	ret = clk_core_set_rate_nolock(core, val);
3459  	clk_prepare_unlock();
3460  
3461  	return ret;
3462  }
3463  
3464  #define clk_rate_mode	0644
3465  
clk_phase_set(void * data,u64 val)3466  static int clk_phase_set(void *data, u64 val)
3467  {
3468  	struct clk_core *core = data;
3469  	int degrees = do_div(val, 360);
3470  	int ret;
3471  
3472  	clk_prepare_lock();
3473  	ret = clk_core_set_phase_nolock(core, degrees);
3474  	clk_prepare_unlock();
3475  
3476  	return ret;
3477  }
3478  
3479  #define clk_phase_mode	0644
3480  
clk_prepare_enable_set(void * data,u64 val)3481  static int clk_prepare_enable_set(void *data, u64 val)
3482  {
3483  	struct clk_core *core = data;
3484  	int ret = 0;
3485  
3486  	if (val)
3487  		ret = clk_prepare_enable(core->hw->clk);
3488  	else
3489  		clk_disable_unprepare(core->hw->clk);
3490  
3491  	return ret;
3492  }
3493  
clk_prepare_enable_get(void * data,u64 * val)3494  static int clk_prepare_enable_get(void *data, u64 *val)
3495  {
3496  	struct clk_core *core = data;
3497  
3498  	*val = core->enable_count && core->prepare_count;
3499  	return 0;
3500  }
3501  
3502  DEFINE_DEBUGFS_ATTRIBUTE(clk_prepare_enable_fops, clk_prepare_enable_get,
3503  			 clk_prepare_enable_set, "%llu\n");
3504  
3505  #else
3506  #define clk_rate_set	NULL
3507  #define clk_rate_mode	0444
3508  
3509  #define clk_phase_set	NULL
3510  #define clk_phase_mode	0644
3511  #endif
3512  
clk_rate_get(void * data,u64 * val)3513  static int clk_rate_get(void *data, u64 *val)
3514  {
3515  	struct clk_core *core = data;
3516  
3517  	clk_prepare_lock();
3518  	*val = clk_core_get_rate_recalc(core);
3519  	clk_prepare_unlock();
3520  
3521  	return 0;
3522  }
3523  
3524  DEFINE_DEBUGFS_ATTRIBUTE(clk_rate_fops, clk_rate_get, clk_rate_set, "%llu\n");
3525  
clk_phase_get(void * data,u64 * val)3526  static int clk_phase_get(void *data, u64 *val)
3527  {
3528  	struct clk_core *core = data;
3529  
3530  	*val = core->phase;
3531  	return 0;
3532  }
3533  
3534  DEFINE_DEBUGFS_ATTRIBUTE(clk_phase_fops, clk_phase_get, clk_phase_set, "%llu\n");
3535  
3536  static const struct {
3537  	unsigned long flag;
3538  	const char *name;
3539  } clk_flags[] = {
3540  #define ENTRY(f) { f, #f }
3541  	ENTRY(CLK_SET_RATE_GATE),
3542  	ENTRY(CLK_SET_PARENT_GATE),
3543  	ENTRY(CLK_SET_RATE_PARENT),
3544  	ENTRY(CLK_IGNORE_UNUSED),
3545  	ENTRY(CLK_GET_RATE_NOCACHE),
3546  	ENTRY(CLK_SET_RATE_NO_REPARENT),
3547  	ENTRY(CLK_GET_ACCURACY_NOCACHE),
3548  	ENTRY(CLK_RECALC_NEW_RATES),
3549  	ENTRY(CLK_SET_RATE_UNGATE),
3550  	ENTRY(CLK_IS_CRITICAL),
3551  	ENTRY(CLK_OPS_PARENT_ENABLE),
3552  	ENTRY(CLK_DUTY_CYCLE_PARENT),
3553  #undef ENTRY
3554  };
3555  
clk_flags_show(struct seq_file * s,void * data)3556  static int clk_flags_show(struct seq_file *s, void *data)
3557  {
3558  	struct clk_core *core = s->private;
3559  	unsigned long flags = core->flags;
3560  	unsigned int i;
3561  
3562  	for (i = 0; flags && i < ARRAY_SIZE(clk_flags); i++) {
3563  		if (flags & clk_flags[i].flag) {
3564  			seq_printf(s, "%s\n", clk_flags[i].name);
3565  			flags &= ~clk_flags[i].flag;
3566  		}
3567  	}
3568  	if (flags) {
3569  		/* Unknown flags */
3570  		seq_printf(s, "0x%lx\n", flags);
3571  	}
3572  
3573  	return 0;
3574  }
3575  DEFINE_SHOW_ATTRIBUTE(clk_flags);
3576  
possible_parent_show(struct seq_file * s,struct clk_core * core,unsigned int i,char terminator)3577  static void possible_parent_show(struct seq_file *s, struct clk_core *core,
3578  				 unsigned int i, char terminator)
3579  {
3580  	struct clk_core *parent;
3581  	const char *name = NULL;
3582  
3583  	/*
3584  	 * Go through the following options to fetch a parent's name.
3585  	 *
3586  	 * 1. Fetch the registered parent clock and use its name
3587  	 * 2. Use the global (fallback) name if specified
3588  	 * 3. Use the local fw_name if provided
3589  	 * 4. Fetch parent clock's clock-output-name if DT index was set
3590  	 *
3591  	 * This may still fail in some cases, such as when the parent is
3592  	 * specified directly via a struct clk_hw pointer, but it isn't
3593  	 * registered (yet).
3594  	 */
3595  	parent = clk_core_get_parent_by_index(core, i);
3596  	if (parent) {
3597  		seq_puts(s, parent->name);
3598  	} else if (core->parents[i].name) {
3599  		seq_puts(s, core->parents[i].name);
3600  	} else if (core->parents[i].fw_name) {
3601  		seq_printf(s, "<%s>(fw)", core->parents[i].fw_name);
3602  	} else {
3603  		if (core->parents[i].index >= 0)
3604  			name = of_clk_get_parent_name(core->of_node, core->parents[i].index);
3605  		if (!name)
3606  			name = "(missing)";
3607  
3608  		seq_puts(s, name);
3609  	}
3610  
3611  	seq_putc(s, terminator);
3612  }
3613  
possible_parents_show(struct seq_file * s,void * data)3614  static int possible_parents_show(struct seq_file *s, void *data)
3615  {
3616  	struct clk_core *core = s->private;
3617  	int i;
3618  
3619  	for (i = 0; i < core->num_parents - 1; i++)
3620  		possible_parent_show(s, core, i, ' ');
3621  
3622  	possible_parent_show(s, core, i, '\n');
3623  
3624  	return 0;
3625  }
3626  DEFINE_SHOW_ATTRIBUTE(possible_parents);
3627  
current_parent_show(struct seq_file * s,void * data)3628  static int current_parent_show(struct seq_file *s, void *data)
3629  {
3630  	struct clk_core *core = s->private;
3631  
3632  	if (core->parent)
3633  		seq_printf(s, "%s\n", core->parent->name);
3634  
3635  	return 0;
3636  }
3637  DEFINE_SHOW_ATTRIBUTE(current_parent);
3638  
3639  #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
current_parent_write(struct file * file,const char __user * ubuf,size_t count,loff_t * ppos)3640  static ssize_t current_parent_write(struct file *file, const char __user *ubuf,
3641  				    size_t count, loff_t *ppos)
3642  {
3643  	struct seq_file *s = file->private_data;
3644  	struct clk_core *core = s->private;
3645  	struct clk_core *parent;
3646  	u8 idx;
3647  	int err;
3648  
3649  	err = kstrtou8_from_user(ubuf, count, 0, &idx);
3650  	if (err < 0)
3651  		return err;
3652  
3653  	parent = clk_core_get_parent_by_index(core, idx);
3654  	if (!parent)
3655  		return -ENOENT;
3656  
3657  	clk_prepare_lock();
3658  	err = clk_core_set_parent_nolock(core, parent);
3659  	clk_prepare_unlock();
3660  	if (err)
3661  		return err;
3662  
3663  	return count;
3664  }
3665  
3666  static const struct file_operations current_parent_rw_fops = {
3667  	.open		= current_parent_open,
3668  	.write		= current_parent_write,
3669  	.read		= seq_read,
3670  	.llseek		= seq_lseek,
3671  	.release	= single_release,
3672  };
3673  #endif
3674  
clk_duty_cycle_show(struct seq_file * s,void * data)3675  static int clk_duty_cycle_show(struct seq_file *s, void *data)
3676  {
3677  	struct clk_core *core = s->private;
3678  	struct clk_duty *duty = &core->duty;
3679  
3680  	seq_printf(s, "%u/%u\n", duty->num, duty->den);
3681  
3682  	return 0;
3683  }
3684  DEFINE_SHOW_ATTRIBUTE(clk_duty_cycle);
3685  
clk_min_rate_show(struct seq_file * s,void * data)3686  static int clk_min_rate_show(struct seq_file *s, void *data)
3687  {
3688  	struct clk_core *core = s->private;
3689  	unsigned long min_rate, max_rate;
3690  
3691  	clk_prepare_lock();
3692  	clk_core_get_boundaries(core, &min_rate, &max_rate);
3693  	clk_prepare_unlock();
3694  	seq_printf(s, "%lu\n", min_rate);
3695  
3696  	return 0;
3697  }
3698  DEFINE_SHOW_ATTRIBUTE(clk_min_rate);
3699  
clk_max_rate_show(struct seq_file * s,void * data)3700  static int clk_max_rate_show(struct seq_file *s, void *data)
3701  {
3702  	struct clk_core *core = s->private;
3703  	unsigned long min_rate, max_rate;
3704  
3705  	clk_prepare_lock();
3706  	clk_core_get_boundaries(core, &min_rate, &max_rate);
3707  	clk_prepare_unlock();
3708  	seq_printf(s, "%lu\n", max_rate);
3709  
3710  	return 0;
3711  }
3712  DEFINE_SHOW_ATTRIBUTE(clk_max_rate);
3713  
clk_debug_create_one(struct clk_core * core,struct dentry * pdentry)3714  static void clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
3715  {
3716  	struct dentry *root;
3717  
3718  	if (!core || !pdentry)
3719  		return;
3720  
3721  	root = debugfs_create_dir(core->name, pdentry);
3722  	core->dentry = root;
3723  
3724  	debugfs_create_file("clk_rate", clk_rate_mode, root, core,
3725  			    &clk_rate_fops);
3726  	debugfs_create_file("clk_min_rate", 0444, root, core, &clk_min_rate_fops);
3727  	debugfs_create_file("clk_max_rate", 0444, root, core, &clk_max_rate_fops);
3728  	debugfs_create_ulong("clk_accuracy", 0444, root, &core->accuracy);
3729  	debugfs_create_file("clk_phase", clk_phase_mode, root, core,
3730  			    &clk_phase_fops);
3731  	debugfs_create_file("clk_flags", 0444, root, core, &clk_flags_fops);
3732  	debugfs_create_u32("clk_prepare_count", 0444, root, &core->prepare_count);
3733  	debugfs_create_u32("clk_enable_count", 0444, root, &core->enable_count);
3734  	debugfs_create_u32("clk_protect_count", 0444, root, &core->protect_count);
3735  	debugfs_create_u32("clk_notifier_count", 0444, root, &core->notifier_count);
3736  	debugfs_create_file("clk_duty_cycle", 0444, root, core,
3737  			    &clk_duty_cycle_fops);
3738  #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3739  	debugfs_create_file("clk_prepare_enable", 0644, root, core,
3740  			    &clk_prepare_enable_fops);
3741  
3742  	if (core->num_parents > 1)
3743  		debugfs_create_file("clk_parent", 0644, root, core,
3744  				    &current_parent_rw_fops);
3745  	else
3746  #endif
3747  	if (core->num_parents > 0)
3748  		debugfs_create_file("clk_parent", 0444, root, core,
3749  				    &current_parent_fops);
3750  
3751  	if (core->num_parents > 1)
3752  		debugfs_create_file("clk_possible_parents", 0444, root, core,
3753  				    &possible_parents_fops);
3754  
3755  	if (core->ops->debug_init)
3756  		core->ops->debug_init(core->hw, core->dentry);
3757  }
3758  
3759  /**
3760   * clk_debug_register - add a clk node to the debugfs clk directory
3761   * @core: the clk being added to the debugfs clk directory
3762   *
3763   * Dynamically adds a clk to the debugfs clk directory if debugfs has been
3764   * initialized.  Otherwise it bails out early since the debugfs clk directory
3765   * will be created lazily by clk_debug_init as part of a late_initcall.
3766   */
clk_debug_register(struct clk_core * core)3767  static void clk_debug_register(struct clk_core *core)
3768  {
3769  	mutex_lock(&clk_debug_lock);
3770  	hlist_add_head(&core->debug_node, &clk_debug_list);
3771  	if (inited)
3772  		clk_debug_create_one(core, rootdir);
3773  	mutex_unlock(&clk_debug_lock);
3774  }
3775  
3776   /**
3777   * clk_debug_unregister - remove a clk node from the debugfs clk directory
3778   * @core: the clk being removed from the debugfs clk directory
3779   *
3780   * Dynamically removes a clk and all its child nodes from the
3781   * debugfs clk directory if clk->dentry points to debugfs created by
3782   * clk_debug_register in __clk_core_init.
3783   */
clk_debug_unregister(struct clk_core * core)3784  static void clk_debug_unregister(struct clk_core *core)
3785  {
3786  	mutex_lock(&clk_debug_lock);
3787  	hlist_del_init(&core->debug_node);
3788  	debugfs_remove_recursive(core->dentry);
3789  	core->dentry = NULL;
3790  	mutex_unlock(&clk_debug_lock);
3791  }
3792  
3793  /**
3794   * clk_debug_init - lazily populate the debugfs clk directory
3795   *
3796   * clks are often initialized very early during boot before memory can be
3797   * dynamically allocated and well before debugfs is setup. This function
3798   * populates the debugfs clk directory once at boot-time when we know that
3799   * debugfs is setup. It should only be called once at boot-time, all other clks
3800   * added dynamically will be done so with clk_debug_register.
3801   */
clk_debug_init(void)3802  static int __init clk_debug_init(void)
3803  {
3804  	struct clk_core *core;
3805  
3806  #ifdef CLOCK_ALLOW_WRITE_DEBUGFS
3807  	pr_warn("\n");
3808  	pr_warn("********************************************************************\n");
3809  	pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
3810  	pr_warn("**                                                                **\n");
3811  	pr_warn("**  WRITEABLE clk DebugFS SUPPORT HAS BEEN ENABLED IN THIS KERNEL **\n");
3812  	pr_warn("**                                                                **\n");
3813  	pr_warn("** This means that this kernel is built to expose clk operations  **\n");
3814  	pr_warn("** such as parent or rate setting, enabling, disabling, etc.      **\n");
3815  	pr_warn("** to userspace, which may compromise security on your system.    **\n");
3816  	pr_warn("**                                                                **\n");
3817  	pr_warn("** If you see this message and you are not debugging the          **\n");
3818  	pr_warn("** kernel, report this immediately to your vendor!                **\n");
3819  	pr_warn("**                                                                **\n");
3820  	pr_warn("**     NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE NOTICE           **\n");
3821  	pr_warn("********************************************************************\n");
3822  #endif
3823  
3824  	rootdir = debugfs_create_dir("clk", NULL);
3825  
3826  	debugfs_create_file("clk_summary", 0444, rootdir, &all_lists,
3827  			    &clk_summary_fops);
3828  	debugfs_create_file("clk_dump", 0444, rootdir, &all_lists,
3829  			    &clk_dump_fops);
3830  	debugfs_create_file("clk_orphan_summary", 0444, rootdir, &orphan_list,
3831  			    &clk_summary_fops);
3832  	debugfs_create_file("clk_orphan_dump", 0444, rootdir, &orphan_list,
3833  			    &clk_dump_fops);
3834  
3835  	mutex_lock(&clk_debug_lock);
3836  	hlist_for_each_entry(core, &clk_debug_list, debug_node)
3837  		clk_debug_create_one(core, rootdir);
3838  
3839  	inited = 1;
3840  	mutex_unlock(&clk_debug_lock);
3841  
3842  	return 0;
3843  }
3844  late_initcall(clk_debug_init);
3845  #else
clk_debug_register(struct clk_core * core)3846  static inline void clk_debug_register(struct clk_core *core) { }
clk_debug_unregister(struct clk_core * core)3847  static inline void clk_debug_unregister(struct clk_core *core)
3848  {
3849  }
3850  #endif
3851  
clk_core_reparent_orphans_nolock(void)3852  static void clk_core_reparent_orphans_nolock(void)
3853  {
3854  	struct clk_core *orphan;
3855  	struct hlist_node *tmp2;
3856  
3857  	/*
3858  	 * walk the list of orphan clocks and reparent any that newly finds a
3859  	 * parent.
3860  	 */
3861  	hlist_for_each_entry_safe(orphan, tmp2, &clk_orphan_list, child_node) {
3862  		struct clk_core *parent = __clk_init_parent(orphan);
3863  
3864  		/*
3865  		 * We need to use __clk_set_parent_before() and _after() to
3866  		 * properly migrate any prepare/enable count of the orphan
3867  		 * clock. This is important for CLK_IS_CRITICAL clocks, which
3868  		 * are enabled during init but might not have a parent yet.
3869  		 */
3870  		if (parent) {
3871  			/* update the clk tree topology */
3872  			__clk_set_parent_before(orphan, parent);
3873  			__clk_set_parent_after(orphan, parent, NULL);
3874  			__clk_recalc_accuracies(orphan);
3875  			__clk_recalc_rates(orphan, true, 0);
3876  
3877  			/*
3878  			 * __clk_init_parent() will set the initial req_rate to
3879  			 * 0 if the clock doesn't have clk_ops::recalc_rate and
3880  			 * is an orphan when it's registered.
3881  			 *
3882  			 * 'req_rate' is used by clk_set_rate_range() and
3883  			 * clk_put() to trigger a clk_set_rate() call whenever
3884  			 * the boundaries are modified. Let's make sure
3885  			 * 'req_rate' is set to something non-zero so that
3886  			 * clk_set_rate_range() doesn't drop the frequency.
3887  			 */
3888  			orphan->req_rate = orphan->rate;
3889  		}
3890  	}
3891  }
3892  
3893  /**
3894   * __clk_core_init - initialize the data structures in a struct clk_core
3895   * @core:	clk_core being initialized
3896   *
3897   * Initializes the lists in struct clk_core, queries the hardware for the
3898   * parent and rate and sets them both.
3899   */
__clk_core_init(struct clk_core * core)3900  static int __clk_core_init(struct clk_core *core)
3901  {
3902  	int ret;
3903  	struct clk_core *parent;
3904  	unsigned long rate;
3905  	int phase;
3906  
3907  	clk_prepare_lock();
3908  
3909  	/*
3910  	 * Set hw->core after grabbing the prepare_lock to synchronize with
3911  	 * callers of clk_core_fill_parent_index() where we treat hw->core
3912  	 * being NULL as the clk not being registered yet. This is crucial so
3913  	 * that clks aren't parented until their parent is fully registered.
3914  	 */
3915  	core->hw->core = core;
3916  
3917  	ret = clk_pm_runtime_get(core);
3918  	if (ret)
3919  		goto unlock;
3920  
3921  	/* check to see if a clock with this name is already registered */
3922  	if (clk_core_lookup(core->name)) {
3923  		pr_debug("%s: clk %s already initialized\n",
3924  				__func__, core->name);
3925  		ret = -EEXIST;
3926  		goto out;
3927  	}
3928  
3929  	/* check that clk_ops are sane.  See Documentation/driver-api/clk.rst */
3930  	if (core->ops->set_rate &&
3931  	    !((core->ops->round_rate || core->ops->determine_rate) &&
3932  	      core->ops->recalc_rate)) {
3933  		pr_err("%s: %s must implement .round_rate or .determine_rate in addition to .recalc_rate\n",
3934  		       __func__, core->name);
3935  		ret = -EINVAL;
3936  		goto out;
3937  	}
3938  
3939  	if (core->ops->set_parent && !core->ops->get_parent) {
3940  		pr_err("%s: %s must implement .get_parent & .set_parent\n",
3941  		       __func__, core->name);
3942  		ret = -EINVAL;
3943  		goto out;
3944  	}
3945  
3946  	if (core->ops->set_parent && !core->ops->determine_rate) {
3947  		pr_err("%s: %s must implement .set_parent & .determine_rate\n",
3948  			__func__, core->name);
3949  		ret = -EINVAL;
3950  		goto out;
3951  	}
3952  
3953  	if (core->num_parents > 1 && !core->ops->get_parent) {
3954  		pr_err("%s: %s must implement .get_parent as it has multi parents\n",
3955  		       __func__, core->name);
3956  		ret = -EINVAL;
3957  		goto out;
3958  	}
3959  
3960  	if (core->ops->set_rate_and_parent &&
3961  			!(core->ops->set_parent && core->ops->set_rate)) {
3962  		pr_err("%s: %s must implement .set_parent & .set_rate\n",
3963  				__func__, core->name);
3964  		ret = -EINVAL;
3965  		goto out;
3966  	}
3967  
3968  	/*
3969  	 * optional platform-specific magic
3970  	 *
3971  	 * The .init callback is not used by any of the basic clock types, but
3972  	 * exists for weird hardware that must perform initialization magic for
3973  	 * CCF to get an accurate view of clock for any other callbacks. It may
3974  	 * also be used needs to perform dynamic allocations. Such allocation
3975  	 * must be freed in the terminate() callback.
3976  	 * This callback shall not be used to initialize the parameters state,
3977  	 * such as rate, parent, etc ...
3978  	 *
3979  	 * If it exist, this callback should called before any other callback of
3980  	 * the clock
3981  	 */
3982  	if (core->ops->init) {
3983  		ret = core->ops->init(core->hw);
3984  		if (ret)
3985  			goto out;
3986  	}
3987  
3988  	parent = core->parent = __clk_init_parent(core);
3989  
3990  	/*
3991  	 * Populate core->parent if parent has already been clk_core_init'd. If
3992  	 * parent has not yet been clk_core_init'd then place clk in the orphan
3993  	 * list.  If clk doesn't have any parents then place it in the root
3994  	 * clk list.
3995  	 *
3996  	 * Every time a new clk is clk_init'd then we walk the list of orphan
3997  	 * clocks and re-parent any that are children of the clock currently
3998  	 * being clk_init'd.
3999  	 */
4000  	if (parent) {
4001  		hlist_add_head(&core->child_node, &parent->children);
4002  		core->orphan = parent->orphan;
4003  	} else if (!core->num_parents) {
4004  		hlist_add_head(&core->child_node, &clk_root_list);
4005  		core->orphan = false;
4006  	} else {
4007  		hlist_add_head(&core->child_node, &clk_orphan_list);
4008  		core->orphan = true;
4009  	}
4010  
4011  	/*
4012  	 * Set clk's accuracy.  The preferred method is to use
4013  	 * .recalc_accuracy. For simple clocks and lazy developers the default
4014  	 * fallback is to use the parent's accuracy.  If a clock doesn't have a
4015  	 * parent (or is orphaned) then accuracy is set to zero (perfect
4016  	 * clock).
4017  	 */
4018  	if (core->ops->recalc_accuracy)
4019  		core->accuracy = core->ops->recalc_accuracy(core->hw,
4020  					clk_core_get_accuracy_no_lock(parent));
4021  	else if (parent)
4022  		core->accuracy = parent->accuracy;
4023  	else
4024  		core->accuracy = 0;
4025  
4026  	/*
4027  	 * Set clk's phase by clk_core_get_phase() caching the phase.
4028  	 * Since a phase is by definition relative to its parent, just
4029  	 * query the current clock phase, or just assume it's in phase.
4030  	 */
4031  	phase = clk_core_get_phase(core);
4032  	if (phase < 0) {
4033  		ret = phase;
4034  		pr_warn("%s: Failed to get phase for clk '%s'\n", __func__,
4035  			core->name);
4036  		goto out;
4037  	}
4038  
4039  	/*
4040  	 * Set clk's duty cycle.
4041  	 */
4042  	clk_core_update_duty_cycle_nolock(core);
4043  
4044  	/*
4045  	 * Set clk's rate.  The preferred method is to use .recalc_rate.  For
4046  	 * simple clocks and lazy developers the default fallback is to use the
4047  	 * parent's rate.  If a clock doesn't have a parent (or is orphaned)
4048  	 * then rate is set to zero.
4049  	 */
4050  	if (core->ops->recalc_rate)
4051  		rate = core->ops->recalc_rate(core->hw,
4052  				clk_core_get_rate_nolock(parent));
4053  	else if (parent)
4054  		rate = parent->rate;
4055  	else
4056  		rate = 0;
4057  	core->rate = core->req_rate = rate;
4058  
4059  	/*
4060  	 * Enable CLK_IS_CRITICAL clocks so newly added critical clocks
4061  	 * don't get accidentally disabled when walking the orphan tree and
4062  	 * reparenting clocks
4063  	 */
4064  	if (core->flags & CLK_IS_CRITICAL) {
4065  		ret = clk_core_prepare(core);
4066  		if (ret) {
4067  			pr_warn("%s: critical clk '%s' failed to prepare\n",
4068  			       __func__, core->name);
4069  			goto out;
4070  		}
4071  
4072  		ret = clk_core_enable_lock(core);
4073  		if (ret) {
4074  			pr_warn("%s: critical clk '%s' failed to enable\n",
4075  			       __func__, core->name);
4076  			clk_core_unprepare(core);
4077  			goto out;
4078  		}
4079  	}
4080  
4081  	clk_core_reparent_orphans_nolock();
4082  out:
4083  	clk_pm_runtime_put(core);
4084  unlock:
4085  	if (ret) {
4086  		hlist_del_init(&core->child_node);
4087  		core->hw->core = NULL;
4088  	}
4089  
4090  	clk_prepare_unlock();
4091  
4092  	if (!ret)
4093  		clk_debug_register(core);
4094  
4095  	return ret;
4096  }
4097  
4098  /**
4099   * clk_core_link_consumer - Add a clk consumer to the list of consumers in a clk_core
4100   * @core: clk to add consumer to
4101   * @clk: consumer to link to a clk
4102   */
clk_core_link_consumer(struct clk_core * core,struct clk * clk)4103  static void clk_core_link_consumer(struct clk_core *core, struct clk *clk)
4104  {
4105  	clk_prepare_lock();
4106  	hlist_add_head(&clk->clks_node, &core->clks);
4107  	clk_prepare_unlock();
4108  }
4109  
4110  /**
4111   * clk_core_unlink_consumer - Remove a clk consumer from the list of consumers in a clk_core
4112   * @clk: consumer to unlink
4113   */
clk_core_unlink_consumer(struct clk * clk)4114  static void clk_core_unlink_consumer(struct clk *clk)
4115  {
4116  	lockdep_assert_held(&prepare_lock);
4117  	hlist_del(&clk->clks_node);
4118  }
4119  
4120  /**
4121   * alloc_clk - Allocate a clk consumer, but leave it unlinked to the clk_core
4122   * @core: clk to allocate a consumer for
4123   * @dev_id: string describing device name
4124   * @con_id: connection ID string on device
4125   *
4126   * Returns: clk consumer left unlinked from the consumer list
4127   */
alloc_clk(struct clk_core * core,const char * dev_id,const char * con_id)4128  static struct clk *alloc_clk(struct clk_core *core, const char *dev_id,
4129  			     const char *con_id)
4130  {
4131  	struct clk *clk;
4132  
4133  	clk = kzalloc(sizeof(*clk), GFP_KERNEL);
4134  	if (!clk)
4135  		return ERR_PTR(-ENOMEM);
4136  
4137  	clk->core = core;
4138  	clk->dev_id = dev_id;
4139  	clk->con_id = kstrdup_const(con_id, GFP_KERNEL);
4140  	clk->max_rate = ULONG_MAX;
4141  
4142  	return clk;
4143  }
4144  
4145  /**
4146   * free_clk - Free a clk consumer
4147   * @clk: clk consumer to free
4148   *
4149   * Note, this assumes the clk has been unlinked from the clk_core consumer
4150   * list.
4151   */
free_clk(struct clk * clk)4152  static void free_clk(struct clk *clk)
4153  {
4154  	kfree_const(clk->con_id);
4155  	kfree(clk);
4156  }
4157  
4158  /**
4159   * clk_hw_create_clk: Allocate and link a clk consumer to a clk_core given
4160   * a clk_hw
4161   * @dev: clk consumer device
4162   * @hw: clk_hw associated with the clk being consumed
4163   * @dev_id: string describing device name
4164   * @con_id: connection ID string on device
4165   *
4166   * This is the main function used to create a clk pointer for use by clk
4167   * consumers. It connects a consumer to the clk_core and clk_hw structures
4168   * used by the framework and clk provider respectively.
4169   */
clk_hw_create_clk(struct device * dev,struct clk_hw * hw,const char * dev_id,const char * con_id)4170  struct clk *clk_hw_create_clk(struct device *dev, struct clk_hw *hw,
4171  			      const char *dev_id, const char *con_id)
4172  {
4173  	struct clk *clk;
4174  	struct clk_core *core;
4175  
4176  	/* This is to allow this function to be chained to others */
4177  	if (IS_ERR_OR_NULL(hw))
4178  		return ERR_CAST(hw);
4179  
4180  	core = hw->core;
4181  	clk = alloc_clk(core, dev_id, con_id);
4182  	if (IS_ERR(clk))
4183  		return clk;
4184  	clk->dev = dev;
4185  
4186  	if (!try_module_get(core->owner)) {
4187  		free_clk(clk);
4188  		return ERR_PTR(-ENOENT);
4189  	}
4190  
4191  	kref_get(&core->ref);
4192  	clk_core_link_consumer(core, clk);
4193  
4194  	return clk;
4195  }
4196  
4197  /**
4198   * clk_hw_get_clk - get clk consumer given an clk_hw
4199   * @hw: clk_hw associated with the clk being consumed
4200   * @con_id: connection ID string on device
4201   *
4202   * Returns: new clk consumer
4203   * This is the function to be used by providers which need
4204   * to get a consumer clk and act on the clock element
4205   * Calls to this function must be balanced with calls clk_put()
4206   */
clk_hw_get_clk(struct clk_hw * hw,const char * con_id)4207  struct clk *clk_hw_get_clk(struct clk_hw *hw, const char *con_id)
4208  {
4209  	struct device *dev = hw->core->dev;
4210  	const char *name = dev ? dev_name(dev) : NULL;
4211  
4212  	return clk_hw_create_clk(dev, hw, name, con_id);
4213  }
4214  EXPORT_SYMBOL(clk_hw_get_clk);
4215  
clk_cpy_name(const char ** dst_p,const char * src,bool must_exist)4216  static int clk_cpy_name(const char **dst_p, const char *src, bool must_exist)
4217  {
4218  	const char *dst;
4219  
4220  	if (!src) {
4221  		if (must_exist)
4222  			return -EINVAL;
4223  		return 0;
4224  	}
4225  
4226  	*dst_p = dst = kstrdup_const(src, GFP_KERNEL);
4227  	if (!dst)
4228  		return -ENOMEM;
4229  
4230  	return 0;
4231  }
4232  
clk_core_populate_parent_map(struct clk_core * core,const struct clk_init_data * init)4233  static int clk_core_populate_parent_map(struct clk_core *core,
4234  					const struct clk_init_data *init)
4235  {
4236  	u8 num_parents = init->num_parents;
4237  	const char * const *parent_names = init->parent_names;
4238  	const struct clk_hw **parent_hws = init->parent_hws;
4239  	const struct clk_parent_data *parent_data = init->parent_data;
4240  	int i, ret = 0;
4241  	struct clk_parent_map *parents, *parent;
4242  
4243  	if (!num_parents)
4244  		return 0;
4245  
4246  	/*
4247  	 * Avoid unnecessary string look-ups of clk_core's possible parents by
4248  	 * having a cache of names/clk_hw pointers to clk_core pointers.
4249  	 */
4250  	parents = kcalloc(num_parents, sizeof(*parents), GFP_KERNEL);
4251  	core->parents = parents;
4252  	if (!parents)
4253  		return -ENOMEM;
4254  
4255  	/* Copy everything over because it might be __initdata */
4256  	for (i = 0, parent = parents; i < num_parents; i++, parent++) {
4257  		parent->index = -1;
4258  		if (parent_names) {
4259  			/* throw a WARN if any entries are NULL */
4260  			WARN(!parent_names[i],
4261  				"%s: invalid NULL in %s's .parent_names\n",
4262  				__func__, core->name);
4263  			ret = clk_cpy_name(&parent->name, parent_names[i],
4264  					   true);
4265  		} else if (parent_data) {
4266  			parent->hw = parent_data[i].hw;
4267  			parent->index = parent_data[i].index;
4268  			ret = clk_cpy_name(&parent->fw_name,
4269  					   parent_data[i].fw_name, false);
4270  			if (!ret)
4271  				ret = clk_cpy_name(&parent->name,
4272  						   parent_data[i].name,
4273  						   false);
4274  		} else if (parent_hws) {
4275  			parent->hw = parent_hws[i];
4276  		} else {
4277  			ret = -EINVAL;
4278  			WARN(1, "Must specify parents if num_parents > 0\n");
4279  		}
4280  
4281  		if (ret) {
4282  			do {
4283  				kfree_const(parents[i].name);
4284  				kfree_const(parents[i].fw_name);
4285  			} while (--i >= 0);
4286  			kfree(parents);
4287  
4288  			return ret;
4289  		}
4290  	}
4291  
4292  	return 0;
4293  }
4294  
clk_core_free_parent_map(struct clk_core * core)4295  static void clk_core_free_parent_map(struct clk_core *core)
4296  {
4297  	int i = core->num_parents;
4298  
4299  	if (!core->num_parents)
4300  		return;
4301  
4302  	while (--i >= 0) {
4303  		kfree_const(core->parents[i].name);
4304  		kfree_const(core->parents[i].fw_name);
4305  	}
4306  
4307  	kfree(core->parents);
4308  }
4309  
4310  /* Free memory allocated for a struct clk_core */
__clk_release(struct kref * ref)4311  static void __clk_release(struct kref *ref)
4312  {
4313  	struct clk_core *core = container_of(ref, struct clk_core, ref);
4314  
4315  	if (core->rpm_enabled) {
4316  		mutex_lock(&clk_rpm_list_lock);
4317  		hlist_del(&core->rpm_node);
4318  		mutex_unlock(&clk_rpm_list_lock);
4319  	}
4320  
4321  	clk_core_free_parent_map(core);
4322  	kfree_const(core->name);
4323  	kfree(core);
4324  }
4325  
4326  static struct clk *
__clk_register(struct device * dev,struct device_node * np,struct clk_hw * hw)4327  __clk_register(struct device *dev, struct device_node *np, struct clk_hw *hw)
4328  {
4329  	int ret;
4330  	struct clk_core *core;
4331  	const struct clk_init_data *init = hw->init;
4332  
4333  	/*
4334  	 * The init data is not supposed to be used outside of registration path.
4335  	 * Set it to NULL so that provider drivers can't use it either and so that
4336  	 * we catch use of hw->init early on in the core.
4337  	 */
4338  	hw->init = NULL;
4339  
4340  	core = kzalloc(sizeof(*core), GFP_KERNEL);
4341  	if (!core) {
4342  		ret = -ENOMEM;
4343  		goto fail_out;
4344  	}
4345  
4346  	kref_init(&core->ref);
4347  
4348  	core->name = kstrdup_const(init->name, GFP_KERNEL);
4349  	if (!core->name) {
4350  		ret = -ENOMEM;
4351  		goto fail_name;
4352  	}
4353  
4354  	if (WARN_ON(!init->ops)) {
4355  		ret = -EINVAL;
4356  		goto fail_ops;
4357  	}
4358  	core->ops = init->ops;
4359  
4360  	core->dev = dev;
4361  	clk_pm_runtime_init(core);
4362  	core->of_node = np;
4363  	if (dev && dev->driver)
4364  		core->owner = dev->driver->owner;
4365  	core->hw = hw;
4366  	core->flags = init->flags;
4367  	core->num_parents = init->num_parents;
4368  	core->min_rate = 0;
4369  	core->max_rate = ULONG_MAX;
4370  
4371  	ret = clk_core_populate_parent_map(core, init);
4372  	if (ret)
4373  		goto fail_parents;
4374  
4375  	INIT_HLIST_HEAD(&core->clks);
4376  
4377  	/*
4378  	 * Don't call clk_hw_create_clk() here because that would pin the
4379  	 * provider module to itself and prevent it from ever being removed.
4380  	 */
4381  	hw->clk = alloc_clk(core, NULL, NULL);
4382  	if (IS_ERR(hw->clk)) {
4383  		ret = PTR_ERR(hw->clk);
4384  		goto fail_create_clk;
4385  	}
4386  
4387  	clk_core_link_consumer(core, hw->clk);
4388  
4389  	ret = __clk_core_init(core);
4390  	if (!ret)
4391  		return hw->clk;
4392  
4393  	clk_prepare_lock();
4394  	clk_core_unlink_consumer(hw->clk);
4395  	clk_prepare_unlock();
4396  
4397  	free_clk(hw->clk);
4398  	hw->clk = NULL;
4399  
4400  fail_create_clk:
4401  fail_parents:
4402  fail_ops:
4403  fail_name:
4404  	kref_put(&core->ref, __clk_release);
4405  fail_out:
4406  	return ERR_PTR(ret);
4407  }
4408  
4409  /**
4410   * dev_or_parent_of_node() - Get device node of @dev or @dev's parent
4411   * @dev: Device to get device node of
4412   *
4413   * Return: device node pointer of @dev, or the device node pointer of
4414   * @dev->parent if dev doesn't have a device node, or NULL if neither
4415   * @dev or @dev->parent have a device node.
4416   */
dev_or_parent_of_node(struct device * dev)4417  static struct device_node *dev_or_parent_of_node(struct device *dev)
4418  {
4419  	struct device_node *np;
4420  
4421  	if (!dev)
4422  		return NULL;
4423  
4424  	np = dev_of_node(dev);
4425  	if (!np)
4426  		np = dev_of_node(dev->parent);
4427  
4428  	return np;
4429  }
4430  
4431  /**
4432   * clk_register - allocate a new clock, register it and return an opaque cookie
4433   * @dev: device that is registering this clock
4434   * @hw: link to hardware-specific clock data
4435   *
4436   * clk_register is the *deprecated* interface for populating the clock tree with
4437   * new clock nodes. Use clk_hw_register() instead.
4438   *
4439   * Returns: a pointer to the newly allocated struct clk which
4440   * cannot be dereferenced by driver code but may be used in conjunction with the
4441   * rest of the clock API.  In the event of an error clk_register will return an
4442   * error code; drivers must test for an error code after calling clk_register.
4443   */
clk_register(struct device * dev,struct clk_hw * hw)4444  struct clk *clk_register(struct device *dev, struct clk_hw *hw)
4445  {
4446  	return __clk_register(dev, dev_or_parent_of_node(dev), hw);
4447  }
4448  EXPORT_SYMBOL_GPL(clk_register);
4449  
4450  /**
4451   * clk_hw_register - register a clk_hw and return an error code
4452   * @dev: device that is registering this clock
4453   * @hw: link to hardware-specific clock data
4454   *
4455   * clk_hw_register is the primary interface for populating the clock tree with
4456   * new clock nodes. It returns an integer equal to zero indicating success or
4457   * less than zero indicating failure. Drivers must test for an error code after
4458   * calling clk_hw_register().
4459   */
clk_hw_register(struct device * dev,struct clk_hw * hw)4460  int clk_hw_register(struct device *dev, struct clk_hw *hw)
4461  {
4462  	return PTR_ERR_OR_ZERO(__clk_register(dev, dev_or_parent_of_node(dev),
4463  			       hw));
4464  }
4465  EXPORT_SYMBOL_GPL(clk_hw_register);
4466  
4467  /*
4468   * of_clk_hw_register - register a clk_hw and return an error code
4469   * @node: device_node of device that is registering this clock
4470   * @hw: link to hardware-specific clock data
4471   *
4472   * of_clk_hw_register() is the primary interface for populating the clock tree
4473   * with new clock nodes when a struct device is not available, but a struct
4474   * device_node is. It returns an integer equal to zero indicating success or
4475   * less than zero indicating failure. Drivers must test for an error code after
4476   * calling of_clk_hw_register().
4477   */
of_clk_hw_register(struct device_node * node,struct clk_hw * hw)4478  int of_clk_hw_register(struct device_node *node, struct clk_hw *hw)
4479  {
4480  	return PTR_ERR_OR_ZERO(__clk_register(NULL, node, hw));
4481  }
4482  EXPORT_SYMBOL_GPL(of_clk_hw_register);
4483  
4484  /*
4485   * Empty clk_ops for unregistered clocks. These are used temporarily
4486   * after clk_unregister() was called on a clock and until last clock
4487   * consumer calls clk_put() and the struct clk object is freed.
4488   */
clk_nodrv_prepare_enable(struct clk_hw * hw)4489  static int clk_nodrv_prepare_enable(struct clk_hw *hw)
4490  {
4491  	return -ENXIO;
4492  }
4493  
clk_nodrv_disable_unprepare(struct clk_hw * hw)4494  static void clk_nodrv_disable_unprepare(struct clk_hw *hw)
4495  {
4496  	WARN_ON_ONCE(1);
4497  }
4498  
clk_nodrv_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)4499  static int clk_nodrv_set_rate(struct clk_hw *hw, unsigned long rate,
4500  					unsigned long parent_rate)
4501  {
4502  	return -ENXIO;
4503  }
4504  
clk_nodrv_set_parent(struct clk_hw * hw,u8 index)4505  static int clk_nodrv_set_parent(struct clk_hw *hw, u8 index)
4506  {
4507  	return -ENXIO;
4508  }
4509  
clk_nodrv_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)4510  static int clk_nodrv_determine_rate(struct clk_hw *hw,
4511  				    struct clk_rate_request *req)
4512  {
4513  	return -ENXIO;
4514  }
4515  
4516  static const struct clk_ops clk_nodrv_ops = {
4517  	.enable		= clk_nodrv_prepare_enable,
4518  	.disable	= clk_nodrv_disable_unprepare,
4519  	.prepare	= clk_nodrv_prepare_enable,
4520  	.unprepare	= clk_nodrv_disable_unprepare,
4521  	.determine_rate	= clk_nodrv_determine_rate,
4522  	.set_rate	= clk_nodrv_set_rate,
4523  	.set_parent	= clk_nodrv_set_parent,
4524  };
4525  
clk_core_evict_parent_cache_subtree(struct clk_core * root,const struct clk_core * target)4526  static void clk_core_evict_parent_cache_subtree(struct clk_core *root,
4527  						const struct clk_core *target)
4528  {
4529  	int i;
4530  	struct clk_core *child;
4531  
4532  	for (i = 0; i < root->num_parents; i++)
4533  		if (root->parents[i].core == target)
4534  			root->parents[i].core = NULL;
4535  
4536  	hlist_for_each_entry(child, &root->children, child_node)
4537  		clk_core_evict_parent_cache_subtree(child, target);
4538  }
4539  
4540  /* Remove this clk from all parent caches */
clk_core_evict_parent_cache(struct clk_core * core)4541  static void clk_core_evict_parent_cache(struct clk_core *core)
4542  {
4543  	const struct hlist_head **lists;
4544  	struct clk_core *root;
4545  
4546  	lockdep_assert_held(&prepare_lock);
4547  
4548  	for (lists = all_lists; *lists; lists++)
4549  		hlist_for_each_entry(root, *lists, child_node)
4550  			clk_core_evict_parent_cache_subtree(root, core);
4551  
4552  }
4553  
4554  /**
4555   * clk_unregister - unregister a currently registered clock
4556   * @clk: clock to unregister
4557   */
clk_unregister(struct clk * clk)4558  void clk_unregister(struct clk *clk)
4559  {
4560  	unsigned long flags;
4561  	const struct clk_ops *ops;
4562  
4563  	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4564  		return;
4565  
4566  	clk_debug_unregister(clk->core);
4567  
4568  	clk_prepare_lock();
4569  
4570  	ops = clk->core->ops;
4571  	if (ops == &clk_nodrv_ops) {
4572  		pr_err("%s: unregistered clock: %s\n", __func__,
4573  		       clk->core->name);
4574  		clk_prepare_unlock();
4575  		return;
4576  	}
4577  	/*
4578  	 * Assign empty clock ops for consumers that might still hold
4579  	 * a reference to this clock.
4580  	 */
4581  	flags = clk_enable_lock();
4582  	clk->core->ops = &clk_nodrv_ops;
4583  	clk_enable_unlock(flags);
4584  
4585  	if (ops->terminate)
4586  		ops->terminate(clk->core->hw);
4587  
4588  	if (!hlist_empty(&clk->core->children)) {
4589  		struct clk_core *child;
4590  		struct hlist_node *t;
4591  
4592  		/* Reparent all children to the orphan list. */
4593  		hlist_for_each_entry_safe(child, t, &clk->core->children,
4594  					  child_node)
4595  			clk_core_set_parent_nolock(child, NULL);
4596  	}
4597  
4598  	clk_core_evict_parent_cache(clk->core);
4599  
4600  	hlist_del_init(&clk->core->child_node);
4601  
4602  	if (clk->core->prepare_count)
4603  		pr_warn("%s: unregistering prepared clock: %s\n",
4604  					__func__, clk->core->name);
4605  
4606  	if (clk->core->protect_count)
4607  		pr_warn("%s: unregistering protected clock: %s\n",
4608  					__func__, clk->core->name);
4609  	clk_prepare_unlock();
4610  
4611  	kref_put(&clk->core->ref, __clk_release);
4612  	free_clk(clk);
4613  }
4614  EXPORT_SYMBOL_GPL(clk_unregister);
4615  
4616  /**
4617   * clk_hw_unregister - unregister a currently registered clk_hw
4618   * @hw: hardware-specific clock data to unregister
4619   */
clk_hw_unregister(struct clk_hw * hw)4620  void clk_hw_unregister(struct clk_hw *hw)
4621  {
4622  	clk_unregister(hw->clk);
4623  }
4624  EXPORT_SYMBOL_GPL(clk_hw_unregister);
4625  
devm_clk_unregister_cb(struct device * dev,void * res)4626  static void devm_clk_unregister_cb(struct device *dev, void *res)
4627  {
4628  	clk_unregister(*(struct clk **)res);
4629  }
4630  
devm_clk_hw_unregister_cb(struct device * dev,void * res)4631  static void devm_clk_hw_unregister_cb(struct device *dev, void *res)
4632  {
4633  	clk_hw_unregister(*(struct clk_hw **)res);
4634  }
4635  
4636  /**
4637   * devm_clk_register - resource managed clk_register()
4638   * @dev: device that is registering this clock
4639   * @hw: link to hardware-specific clock data
4640   *
4641   * Managed clk_register(). This function is *deprecated*, use devm_clk_hw_register() instead.
4642   *
4643   * Clocks returned from this function are automatically clk_unregister()ed on
4644   * driver detach. See clk_register() for more information.
4645   */
devm_clk_register(struct device * dev,struct clk_hw * hw)4646  struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
4647  {
4648  	struct clk *clk;
4649  	struct clk **clkp;
4650  
4651  	clkp = devres_alloc(devm_clk_unregister_cb, sizeof(*clkp), GFP_KERNEL);
4652  	if (!clkp)
4653  		return ERR_PTR(-ENOMEM);
4654  
4655  	clk = clk_register(dev, hw);
4656  	if (!IS_ERR(clk)) {
4657  		*clkp = clk;
4658  		devres_add(dev, clkp);
4659  	} else {
4660  		devres_free(clkp);
4661  	}
4662  
4663  	return clk;
4664  }
4665  EXPORT_SYMBOL_GPL(devm_clk_register);
4666  
4667  /**
4668   * devm_clk_hw_register - resource managed clk_hw_register()
4669   * @dev: device that is registering this clock
4670   * @hw: link to hardware-specific clock data
4671   *
4672   * Managed clk_hw_register(). Clocks registered by this function are
4673   * automatically clk_hw_unregister()ed on driver detach. See clk_hw_register()
4674   * for more information.
4675   */
devm_clk_hw_register(struct device * dev,struct clk_hw * hw)4676  int devm_clk_hw_register(struct device *dev, struct clk_hw *hw)
4677  {
4678  	struct clk_hw **hwp;
4679  	int ret;
4680  
4681  	hwp = devres_alloc(devm_clk_hw_unregister_cb, sizeof(*hwp), GFP_KERNEL);
4682  	if (!hwp)
4683  		return -ENOMEM;
4684  
4685  	ret = clk_hw_register(dev, hw);
4686  	if (!ret) {
4687  		*hwp = hw;
4688  		devres_add(dev, hwp);
4689  	} else {
4690  		devres_free(hwp);
4691  	}
4692  
4693  	return ret;
4694  }
4695  EXPORT_SYMBOL_GPL(devm_clk_hw_register);
4696  
devm_clk_release(struct device * dev,void * res)4697  static void devm_clk_release(struct device *dev, void *res)
4698  {
4699  	clk_put(*(struct clk **)res);
4700  }
4701  
4702  /**
4703   * devm_clk_hw_get_clk - resource managed clk_hw_get_clk()
4704   * @dev: device that is registering this clock
4705   * @hw: clk_hw associated with the clk being consumed
4706   * @con_id: connection ID string on device
4707   *
4708   * Managed clk_hw_get_clk(). Clocks got with this function are
4709   * automatically clk_put() on driver detach. See clk_put()
4710   * for more information.
4711   */
devm_clk_hw_get_clk(struct device * dev,struct clk_hw * hw,const char * con_id)4712  struct clk *devm_clk_hw_get_clk(struct device *dev, struct clk_hw *hw,
4713  				const char *con_id)
4714  {
4715  	struct clk *clk;
4716  	struct clk **clkp;
4717  
4718  	/* This should not happen because it would mean we have drivers
4719  	 * passing around clk_hw pointers instead of having the caller use
4720  	 * proper clk_get() style APIs
4721  	 */
4722  	WARN_ON_ONCE(dev != hw->core->dev);
4723  
4724  	clkp = devres_alloc(devm_clk_release, sizeof(*clkp), GFP_KERNEL);
4725  	if (!clkp)
4726  		return ERR_PTR(-ENOMEM);
4727  
4728  	clk = clk_hw_get_clk(hw, con_id);
4729  	if (!IS_ERR(clk)) {
4730  		*clkp = clk;
4731  		devres_add(dev, clkp);
4732  	} else {
4733  		devres_free(clkp);
4734  	}
4735  
4736  	return clk;
4737  }
4738  EXPORT_SYMBOL_GPL(devm_clk_hw_get_clk);
4739  
4740  /*
4741   * clkdev helpers
4742   */
4743  
__clk_put(struct clk * clk)4744  void __clk_put(struct clk *clk)
4745  {
4746  	struct module *owner;
4747  
4748  	if (!clk || WARN_ON_ONCE(IS_ERR(clk)))
4749  		return;
4750  
4751  	clk_prepare_lock();
4752  
4753  	/*
4754  	 * Before calling clk_put, all calls to clk_rate_exclusive_get() from a
4755  	 * given user should be balanced with calls to clk_rate_exclusive_put()
4756  	 * and by that same consumer
4757  	 */
4758  	if (WARN_ON(clk->exclusive_count)) {
4759  		/* We voiced our concern, let's sanitize the situation */
4760  		clk->core->protect_count -= (clk->exclusive_count - 1);
4761  		clk_core_rate_unprotect(clk->core);
4762  		clk->exclusive_count = 0;
4763  	}
4764  
4765  	clk_core_unlink_consumer(clk);
4766  
4767  	/* If we had any boundaries on that clock, let's drop them. */
4768  	if (clk->min_rate > 0 || clk->max_rate < ULONG_MAX)
4769  		clk_set_rate_range_nolock(clk, 0, ULONG_MAX);
4770  
4771  	clk_prepare_unlock();
4772  
4773  	owner = clk->core->owner;
4774  	kref_put(&clk->core->ref, __clk_release);
4775  	module_put(owner);
4776  	free_clk(clk);
4777  }
4778  
4779  /***        clk rate change notifiers        ***/
4780  
4781  /**
4782   * clk_notifier_register - add a clk rate change notifier
4783   * @clk: struct clk * to watch
4784   * @nb: struct notifier_block * with callback info
4785   *
4786   * Request notification when clk's rate changes.  This uses an SRCU
4787   * notifier because we want it to block and notifier unregistrations are
4788   * uncommon.  The callbacks associated with the notifier must not
4789   * re-enter into the clk framework by calling any top-level clk APIs;
4790   * this will cause a nested prepare_lock mutex.
4791   *
4792   * In all notification cases (pre, post and abort rate change) the original
4793   * clock rate is passed to the callback via struct clk_notifier_data.old_rate
4794   * and the new frequency is passed via struct clk_notifier_data.new_rate.
4795   *
4796   * clk_notifier_register() must be called from non-atomic context.
4797   * Returns -EINVAL if called with null arguments, -ENOMEM upon
4798   * allocation failure; otherwise, passes along the return value of
4799   * srcu_notifier_chain_register().
4800   */
clk_notifier_register(struct clk * clk,struct notifier_block * nb)4801  int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
4802  {
4803  	struct clk_notifier *cn;
4804  	int ret = -ENOMEM;
4805  
4806  	if (!clk || !nb)
4807  		return -EINVAL;
4808  
4809  	clk_prepare_lock();
4810  
4811  	/* search the list of notifiers for this clk */
4812  	list_for_each_entry(cn, &clk_notifier_list, node)
4813  		if (cn->clk == clk)
4814  			goto found;
4815  
4816  	/* if clk wasn't in the notifier list, allocate new clk_notifier */
4817  	cn = kzalloc(sizeof(*cn), GFP_KERNEL);
4818  	if (!cn)
4819  		goto out;
4820  
4821  	cn->clk = clk;
4822  	srcu_init_notifier_head(&cn->notifier_head);
4823  
4824  	list_add(&cn->node, &clk_notifier_list);
4825  
4826  found:
4827  	ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
4828  
4829  	clk->core->notifier_count++;
4830  
4831  out:
4832  	clk_prepare_unlock();
4833  
4834  	return ret;
4835  }
4836  EXPORT_SYMBOL_GPL(clk_notifier_register);
4837  
4838  /**
4839   * clk_notifier_unregister - remove a clk rate change notifier
4840   * @clk: struct clk *
4841   * @nb: struct notifier_block * with callback info
4842   *
4843   * Request no further notification for changes to 'clk' and frees memory
4844   * allocated in clk_notifier_register.
4845   *
4846   * Returns -EINVAL if called with null arguments; otherwise, passes
4847   * along the return value of srcu_notifier_chain_unregister().
4848   */
clk_notifier_unregister(struct clk * clk,struct notifier_block * nb)4849  int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
4850  {
4851  	struct clk_notifier *cn;
4852  	int ret = -ENOENT;
4853  
4854  	if (!clk || !nb)
4855  		return -EINVAL;
4856  
4857  	clk_prepare_lock();
4858  
4859  	list_for_each_entry(cn, &clk_notifier_list, node) {
4860  		if (cn->clk == clk) {
4861  			ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
4862  
4863  			clk->core->notifier_count--;
4864  
4865  			/* XXX the notifier code should handle this better */
4866  			if (!cn->notifier_head.head) {
4867  				srcu_cleanup_notifier_head(&cn->notifier_head);
4868  				list_del(&cn->node);
4869  				kfree(cn);
4870  			}
4871  			break;
4872  		}
4873  	}
4874  
4875  	clk_prepare_unlock();
4876  
4877  	return ret;
4878  }
4879  EXPORT_SYMBOL_GPL(clk_notifier_unregister);
4880  
4881  struct clk_notifier_devres {
4882  	struct clk *clk;
4883  	struct notifier_block *nb;
4884  };
4885  
devm_clk_notifier_release(struct device * dev,void * res)4886  static void devm_clk_notifier_release(struct device *dev, void *res)
4887  {
4888  	struct clk_notifier_devres *devres = res;
4889  
4890  	clk_notifier_unregister(devres->clk, devres->nb);
4891  }
4892  
devm_clk_notifier_register(struct device * dev,struct clk * clk,struct notifier_block * nb)4893  int devm_clk_notifier_register(struct device *dev, struct clk *clk,
4894  			       struct notifier_block *nb)
4895  {
4896  	struct clk_notifier_devres *devres;
4897  	int ret;
4898  
4899  	devres = devres_alloc(devm_clk_notifier_release,
4900  			      sizeof(*devres), GFP_KERNEL);
4901  
4902  	if (!devres)
4903  		return -ENOMEM;
4904  
4905  	ret = clk_notifier_register(clk, nb);
4906  	if (!ret) {
4907  		devres->clk = clk;
4908  		devres->nb = nb;
4909  		devres_add(dev, devres);
4910  	} else {
4911  		devres_free(devres);
4912  	}
4913  
4914  	return ret;
4915  }
4916  EXPORT_SYMBOL_GPL(devm_clk_notifier_register);
4917  
4918  #ifdef CONFIG_OF
clk_core_reparent_orphans(void)4919  static void clk_core_reparent_orphans(void)
4920  {
4921  	clk_prepare_lock();
4922  	clk_core_reparent_orphans_nolock();
4923  	clk_prepare_unlock();
4924  }
4925  
4926  /**
4927   * struct of_clk_provider - Clock provider registration structure
4928   * @link: Entry in global list of clock providers
4929   * @node: Pointer to device tree node of clock provider
4930   * @get: Get clock callback.  Returns NULL or a struct clk for the
4931   *       given clock specifier
4932   * @get_hw: Get clk_hw callback.  Returns NULL, ERR_PTR or a
4933   *       struct clk_hw for the given clock specifier
4934   * @data: context pointer to be passed into @get callback
4935   */
4936  struct of_clk_provider {
4937  	struct list_head link;
4938  
4939  	struct device_node *node;
4940  	struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
4941  	struct clk_hw *(*get_hw)(struct of_phandle_args *clkspec, void *data);
4942  	void *data;
4943  };
4944  
4945  extern struct of_device_id __clk_of_table;
4946  static const struct of_device_id __clk_of_table_sentinel
4947  	__used __section("__clk_of_table_end");
4948  
4949  static LIST_HEAD(of_clk_providers);
4950  static DEFINE_MUTEX(of_clk_mutex);
4951  
of_clk_src_simple_get(struct of_phandle_args * clkspec,void * data)4952  struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
4953  				     void *data)
4954  {
4955  	return data;
4956  }
4957  EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
4958  
of_clk_hw_simple_get(struct of_phandle_args * clkspec,void * data)4959  struct clk_hw *of_clk_hw_simple_get(struct of_phandle_args *clkspec, void *data)
4960  {
4961  	return data;
4962  }
4963  EXPORT_SYMBOL_GPL(of_clk_hw_simple_get);
4964  
of_clk_src_onecell_get(struct of_phandle_args * clkspec,void * data)4965  struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
4966  {
4967  	struct clk_onecell_data *clk_data = data;
4968  	unsigned int idx = clkspec->args[0];
4969  
4970  	if (idx >= clk_data->clk_num) {
4971  		pr_err("%s: invalid clock index %u\n", __func__, idx);
4972  		return ERR_PTR(-EINVAL);
4973  	}
4974  
4975  	return clk_data->clks[idx];
4976  }
4977  EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
4978  
4979  struct clk_hw *
of_clk_hw_onecell_get(struct of_phandle_args * clkspec,void * data)4980  of_clk_hw_onecell_get(struct of_phandle_args *clkspec, void *data)
4981  {
4982  	struct clk_hw_onecell_data *hw_data = data;
4983  	unsigned int idx = clkspec->args[0];
4984  
4985  	if (idx >= hw_data->num) {
4986  		pr_err("%s: invalid index %u\n", __func__, idx);
4987  		return ERR_PTR(-EINVAL);
4988  	}
4989  
4990  	return hw_data->hws[idx];
4991  }
4992  EXPORT_SYMBOL_GPL(of_clk_hw_onecell_get);
4993  
4994  /**
4995   * of_clk_add_provider() - Register a clock provider for a node
4996   * @np: Device node pointer associated with clock provider
4997   * @clk_src_get: callback for decoding clock
4998   * @data: context pointer for @clk_src_get callback.
4999   *
5000   * This function is *deprecated*. Use of_clk_add_hw_provider() instead.
5001   */
of_clk_add_provider(struct device_node * np,struct clk * (* clk_src_get)(struct of_phandle_args * clkspec,void * data),void * data)5002  int of_clk_add_provider(struct device_node *np,
5003  			struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
5004  						   void *data),
5005  			void *data)
5006  {
5007  	struct of_clk_provider *cp;
5008  	int ret;
5009  
5010  	if (!np)
5011  		return 0;
5012  
5013  	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
5014  	if (!cp)
5015  		return -ENOMEM;
5016  
5017  	cp->node = of_node_get(np);
5018  	cp->data = data;
5019  	cp->get = clk_src_get;
5020  
5021  	mutex_lock(&of_clk_mutex);
5022  	list_add(&cp->link, &of_clk_providers);
5023  	mutex_unlock(&of_clk_mutex);
5024  	pr_debug("Added clock from %pOF\n", np);
5025  
5026  	clk_core_reparent_orphans();
5027  
5028  	ret = of_clk_set_defaults(np, true);
5029  	if (ret < 0)
5030  		of_clk_del_provider(np);
5031  
5032  	fwnode_dev_initialized(&np->fwnode, true);
5033  
5034  	return ret;
5035  }
5036  EXPORT_SYMBOL_GPL(of_clk_add_provider);
5037  
5038  /**
5039   * of_clk_add_hw_provider() - Register a clock provider for a node
5040   * @np: Device node pointer associated with clock provider
5041   * @get: callback for decoding clk_hw
5042   * @data: context pointer for @get callback.
5043   */
of_clk_add_hw_provider(struct device_node * np,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)5044  int of_clk_add_hw_provider(struct device_node *np,
5045  			   struct clk_hw *(*get)(struct of_phandle_args *clkspec,
5046  						 void *data),
5047  			   void *data)
5048  {
5049  	struct of_clk_provider *cp;
5050  	int ret;
5051  
5052  	if (!np)
5053  		return 0;
5054  
5055  	cp = kzalloc(sizeof(*cp), GFP_KERNEL);
5056  	if (!cp)
5057  		return -ENOMEM;
5058  
5059  	cp->node = of_node_get(np);
5060  	cp->data = data;
5061  	cp->get_hw = get;
5062  
5063  	mutex_lock(&of_clk_mutex);
5064  	list_add(&cp->link, &of_clk_providers);
5065  	mutex_unlock(&of_clk_mutex);
5066  	pr_debug("Added clk_hw provider from %pOF\n", np);
5067  
5068  	clk_core_reparent_orphans();
5069  
5070  	ret = of_clk_set_defaults(np, true);
5071  	if (ret < 0)
5072  		of_clk_del_provider(np);
5073  
5074  	fwnode_dev_initialized(&np->fwnode, true);
5075  
5076  	return ret;
5077  }
5078  EXPORT_SYMBOL_GPL(of_clk_add_hw_provider);
5079  
devm_of_clk_release_provider(struct device * dev,void * res)5080  static void devm_of_clk_release_provider(struct device *dev, void *res)
5081  {
5082  	of_clk_del_provider(*(struct device_node **)res);
5083  }
5084  
5085  /*
5086   * We allow a child device to use its parent device as the clock provider node
5087   * for cases like MFD sub-devices where the child device driver wants to use
5088   * devm_*() APIs but not list the device in DT as a sub-node.
5089   */
get_clk_provider_node(struct device * dev)5090  static struct device_node *get_clk_provider_node(struct device *dev)
5091  {
5092  	struct device_node *np, *parent_np;
5093  
5094  	np = dev->of_node;
5095  	parent_np = dev->parent ? dev->parent->of_node : NULL;
5096  
5097  	if (!of_property_present(np, "#clock-cells"))
5098  		if (of_property_present(parent_np, "#clock-cells"))
5099  			np = parent_np;
5100  
5101  	return np;
5102  }
5103  
5104  /**
5105   * devm_of_clk_add_hw_provider() - Managed clk provider node registration
5106   * @dev: Device acting as the clock provider (used for DT node and lifetime)
5107   * @get: callback for decoding clk_hw
5108   * @data: context pointer for @get callback
5109   *
5110   * Registers clock provider for given device's node. If the device has no DT
5111   * node or if the device node lacks of clock provider information (#clock-cells)
5112   * then the parent device's node is scanned for this information. If parent node
5113   * has the #clock-cells then it is used in registration. Provider is
5114   * automatically released at device exit.
5115   *
5116   * Return: 0 on success or an errno on failure.
5117   */
devm_of_clk_add_hw_provider(struct device * dev,struct clk_hw * (* get)(struct of_phandle_args * clkspec,void * data),void * data)5118  int devm_of_clk_add_hw_provider(struct device *dev,
5119  			struct clk_hw *(*get)(struct of_phandle_args *clkspec,
5120  					      void *data),
5121  			void *data)
5122  {
5123  	struct device_node **ptr, *np;
5124  	int ret;
5125  
5126  	ptr = devres_alloc(devm_of_clk_release_provider, sizeof(*ptr),
5127  			   GFP_KERNEL);
5128  	if (!ptr)
5129  		return -ENOMEM;
5130  
5131  	np = get_clk_provider_node(dev);
5132  	ret = of_clk_add_hw_provider(np, get, data);
5133  	if (!ret) {
5134  		*ptr = np;
5135  		devres_add(dev, ptr);
5136  	} else {
5137  		devres_free(ptr);
5138  	}
5139  
5140  	return ret;
5141  }
5142  EXPORT_SYMBOL_GPL(devm_of_clk_add_hw_provider);
5143  
5144  /**
5145   * of_clk_del_provider() - Remove a previously registered clock provider
5146   * @np: Device node pointer associated with clock provider
5147   */
of_clk_del_provider(struct device_node * np)5148  void of_clk_del_provider(struct device_node *np)
5149  {
5150  	struct of_clk_provider *cp;
5151  
5152  	if (!np)
5153  		return;
5154  
5155  	mutex_lock(&of_clk_mutex);
5156  	list_for_each_entry(cp, &of_clk_providers, link) {
5157  		if (cp->node == np) {
5158  			list_del(&cp->link);
5159  			fwnode_dev_initialized(&np->fwnode, false);
5160  			of_node_put(cp->node);
5161  			kfree(cp);
5162  			break;
5163  		}
5164  	}
5165  	mutex_unlock(&of_clk_mutex);
5166  }
5167  EXPORT_SYMBOL_GPL(of_clk_del_provider);
5168  
5169  /**
5170   * of_parse_clkspec() - Parse a DT clock specifier for a given device node
5171   * @np: device node to parse clock specifier from
5172   * @index: index of phandle to parse clock out of. If index < 0, @name is used
5173   * @name: clock name to find and parse. If name is NULL, the index is used
5174   * @out_args: Result of parsing the clock specifier
5175   *
5176   * Parses a device node's "clocks" and "clock-names" properties to find the
5177   * phandle and cells for the index or name that is desired. The resulting clock
5178   * specifier is placed into @out_args, or an errno is returned when there's a
5179   * parsing error. The @index argument is ignored if @name is non-NULL.
5180   *
5181   * Example:
5182   *
5183   * phandle1: clock-controller@1 {
5184   *	#clock-cells = <2>;
5185   * }
5186   *
5187   * phandle2: clock-controller@2 {
5188   *	#clock-cells = <1>;
5189   * }
5190   *
5191   * clock-consumer@3 {
5192   *	clocks = <&phandle1 1 2 &phandle2 3>;
5193   *	clock-names = "name1", "name2";
5194   * }
5195   *
5196   * To get a device_node for `clock-controller@2' node you may call this
5197   * function a few different ways:
5198   *
5199   *   of_parse_clkspec(clock-consumer@3, -1, "name2", &args);
5200   *   of_parse_clkspec(clock-consumer@3, 1, NULL, &args);
5201   *   of_parse_clkspec(clock-consumer@3, 1, "name2", &args);
5202   *
5203   * Return: 0 upon successfully parsing the clock specifier. Otherwise, -ENOENT
5204   * if @name is NULL or -EINVAL if @name is non-NULL and it can't be found in
5205   * the "clock-names" property of @np.
5206   */
of_parse_clkspec(const struct device_node * np,int index,const char * name,struct of_phandle_args * out_args)5207  static int of_parse_clkspec(const struct device_node *np, int index,
5208  			    const char *name, struct of_phandle_args *out_args)
5209  {
5210  	int ret = -ENOENT;
5211  
5212  	/* Walk up the tree of devices looking for a clock property that matches */
5213  	while (np) {
5214  		/*
5215  		 * For named clocks, first look up the name in the
5216  		 * "clock-names" property.  If it cannot be found, then index
5217  		 * will be an error code and of_parse_phandle_with_args() will
5218  		 * return -EINVAL.
5219  		 */
5220  		if (name)
5221  			index = of_property_match_string(np, "clock-names", name);
5222  		ret = of_parse_phandle_with_args(np, "clocks", "#clock-cells",
5223  						 index, out_args);
5224  		if (!ret)
5225  			break;
5226  		if (name && index >= 0)
5227  			break;
5228  
5229  		/*
5230  		 * No matching clock found on this node.  If the parent node
5231  		 * has a "clock-ranges" property, then we can try one of its
5232  		 * clocks.
5233  		 */
5234  		np = np->parent;
5235  		if (np && !of_property_present(np, "clock-ranges"))
5236  			break;
5237  		index = 0;
5238  	}
5239  
5240  	return ret;
5241  }
5242  
5243  static struct clk_hw *
__of_clk_get_hw_from_provider(struct of_clk_provider * provider,struct of_phandle_args * clkspec)5244  __of_clk_get_hw_from_provider(struct of_clk_provider *provider,
5245  			      struct of_phandle_args *clkspec)
5246  {
5247  	struct clk *clk;
5248  
5249  	if (provider->get_hw)
5250  		return provider->get_hw(clkspec, provider->data);
5251  
5252  	clk = provider->get(clkspec, provider->data);
5253  	if (IS_ERR(clk))
5254  		return ERR_CAST(clk);
5255  	return __clk_get_hw(clk);
5256  }
5257  
5258  static struct clk_hw *
of_clk_get_hw_from_clkspec(struct of_phandle_args * clkspec)5259  of_clk_get_hw_from_clkspec(struct of_phandle_args *clkspec)
5260  {
5261  	struct of_clk_provider *provider;
5262  	struct clk_hw *hw = ERR_PTR(-EPROBE_DEFER);
5263  
5264  	if (!clkspec)
5265  		return ERR_PTR(-EINVAL);
5266  
5267  	mutex_lock(&of_clk_mutex);
5268  	list_for_each_entry(provider, &of_clk_providers, link) {
5269  		if (provider->node == clkspec->np) {
5270  			hw = __of_clk_get_hw_from_provider(provider, clkspec);
5271  			if (!IS_ERR(hw))
5272  				break;
5273  		}
5274  	}
5275  	mutex_unlock(&of_clk_mutex);
5276  
5277  	return hw;
5278  }
5279  
5280  /**
5281   * of_clk_get_from_provider() - Lookup a clock from a clock provider
5282   * @clkspec: pointer to a clock specifier data structure
5283   *
5284   * This function looks up a struct clk from the registered list of clock
5285   * providers, an input is a clock specifier data structure as returned
5286   * from the of_parse_phandle_with_args() function call.
5287   */
of_clk_get_from_provider(struct of_phandle_args * clkspec)5288  struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
5289  {
5290  	struct clk_hw *hw = of_clk_get_hw_from_clkspec(clkspec);
5291  
5292  	return clk_hw_create_clk(NULL, hw, NULL, __func__);
5293  }
5294  EXPORT_SYMBOL_GPL(of_clk_get_from_provider);
5295  
of_clk_get_hw(struct device_node * np,int index,const char * con_id)5296  struct clk_hw *of_clk_get_hw(struct device_node *np, int index,
5297  			     const char *con_id)
5298  {
5299  	int ret;
5300  	struct clk_hw *hw;
5301  	struct of_phandle_args clkspec;
5302  
5303  	ret = of_parse_clkspec(np, index, con_id, &clkspec);
5304  	if (ret)
5305  		return ERR_PTR(ret);
5306  
5307  	hw = of_clk_get_hw_from_clkspec(&clkspec);
5308  	of_node_put(clkspec.np);
5309  
5310  	return hw;
5311  }
5312  
__of_clk_get(struct device_node * np,int index,const char * dev_id,const char * con_id)5313  static struct clk *__of_clk_get(struct device_node *np,
5314  				int index, const char *dev_id,
5315  				const char *con_id)
5316  {
5317  	struct clk_hw *hw = of_clk_get_hw(np, index, con_id);
5318  
5319  	return clk_hw_create_clk(NULL, hw, dev_id, con_id);
5320  }
5321  
of_clk_get(struct device_node * np,int index)5322  struct clk *of_clk_get(struct device_node *np, int index)
5323  {
5324  	return __of_clk_get(np, index, np->full_name, NULL);
5325  }
5326  EXPORT_SYMBOL(of_clk_get);
5327  
5328  /**
5329   * of_clk_get_by_name() - Parse and lookup a clock referenced by a device node
5330   * @np: pointer to clock consumer node
5331   * @name: name of consumer's clock input, or NULL for the first clock reference
5332   *
5333   * This function parses the clocks and clock-names properties,
5334   * and uses them to look up the struct clk from the registered list of clock
5335   * providers.
5336   */
of_clk_get_by_name(struct device_node * np,const char * name)5337  struct clk *of_clk_get_by_name(struct device_node *np, const char *name)
5338  {
5339  	if (!np)
5340  		return ERR_PTR(-ENOENT);
5341  
5342  	return __of_clk_get(np, 0, np->full_name, name);
5343  }
5344  EXPORT_SYMBOL(of_clk_get_by_name);
5345  
5346  /**
5347   * of_clk_get_parent_count() - Count the number of clocks a device node has
5348   * @np: device node to count
5349   *
5350   * Returns: The number of clocks that are possible parents of this node
5351   */
of_clk_get_parent_count(const struct device_node * np)5352  unsigned int of_clk_get_parent_count(const struct device_node *np)
5353  {
5354  	int count;
5355  
5356  	count = of_count_phandle_with_args(np, "clocks", "#clock-cells");
5357  	if (count < 0)
5358  		return 0;
5359  
5360  	return count;
5361  }
5362  EXPORT_SYMBOL_GPL(of_clk_get_parent_count);
5363  
of_clk_get_parent_name(const struct device_node * np,int index)5364  const char *of_clk_get_parent_name(const struct device_node *np, int index)
5365  {
5366  	struct of_phandle_args clkspec;
5367  	const char *clk_name;
5368  	bool found = false;
5369  	u32 pv;
5370  	int rc;
5371  	int count;
5372  	struct clk *clk;
5373  
5374  	rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
5375  					&clkspec);
5376  	if (rc)
5377  		return NULL;
5378  
5379  	index = clkspec.args_count ? clkspec.args[0] : 0;
5380  	count = 0;
5381  
5382  	/* if there is an indices property, use it to transfer the index
5383  	 * specified into an array offset for the clock-output-names property.
5384  	 */
5385  	of_property_for_each_u32(clkspec.np, "clock-indices", pv) {
5386  		if (index == pv) {
5387  			index = count;
5388  			found = true;
5389  			break;
5390  		}
5391  		count++;
5392  	}
5393  	/* We went off the end of 'clock-indices' without finding it */
5394  	if (of_property_present(clkspec.np, "clock-indices") && !found)
5395  		return NULL;
5396  
5397  	if (of_property_read_string_index(clkspec.np, "clock-output-names",
5398  					  index,
5399  					  &clk_name) < 0) {
5400  		/*
5401  		 * Best effort to get the name if the clock has been
5402  		 * registered with the framework. If the clock isn't
5403  		 * registered, we return the node name as the name of
5404  		 * the clock as long as #clock-cells = 0.
5405  		 */
5406  		clk = of_clk_get_from_provider(&clkspec);
5407  		if (IS_ERR(clk)) {
5408  			if (clkspec.args_count == 0)
5409  				clk_name = clkspec.np->name;
5410  			else
5411  				clk_name = NULL;
5412  		} else {
5413  			clk_name = __clk_get_name(clk);
5414  			clk_put(clk);
5415  		}
5416  	}
5417  
5418  
5419  	of_node_put(clkspec.np);
5420  	return clk_name;
5421  }
5422  EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
5423  
5424  /**
5425   * of_clk_parent_fill() - Fill @parents with names of @np's parents and return
5426   * number of parents
5427   * @np: Device node pointer associated with clock provider
5428   * @parents: pointer to char array that hold the parents' names
5429   * @size: size of the @parents array
5430   *
5431   * Return: number of parents for the clock node.
5432   */
of_clk_parent_fill(struct device_node * np,const char ** parents,unsigned int size)5433  int of_clk_parent_fill(struct device_node *np, const char **parents,
5434  		       unsigned int size)
5435  {
5436  	unsigned int i = 0;
5437  
5438  	while (i < size && (parents[i] = of_clk_get_parent_name(np, i)) != NULL)
5439  		i++;
5440  
5441  	return i;
5442  }
5443  EXPORT_SYMBOL_GPL(of_clk_parent_fill);
5444  
5445  struct clock_provider {
5446  	void (*clk_init_cb)(struct device_node *);
5447  	struct device_node *np;
5448  	struct list_head node;
5449  };
5450  
5451  /*
5452   * This function looks for a parent clock. If there is one, then it
5453   * checks that the provider for this parent clock was initialized, in
5454   * this case the parent clock will be ready.
5455   */
parent_ready(struct device_node * np)5456  static int parent_ready(struct device_node *np)
5457  {
5458  	int i = 0;
5459  
5460  	while (true) {
5461  		struct clk *clk = of_clk_get(np, i);
5462  
5463  		/* this parent is ready we can check the next one */
5464  		if (!IS_ERR(clk)) {
5465  			clk_put(clk);
5466  			i++;
5467  			continue;
5468  		}
5469  
5470  		/* at least one parent is not ready, we exit now */
5471  		if (PTR_ERR(clk) == -EPROBE_DEFER)
5472  			return 0;
5473  
5474  		/*
5475  		 * Here we make assumption that the device tree is
5476  		 * written correctly. So an error means that there is
5477  		 * no more parent. As we didn't exit yet, then the
5478  		 * previous parent are ready. If there is no clock
5479  		 * parent, no need to wait for them, then we can
5480  		 * consider their absence as being ready
5481  		 */
5482  		return 1;
5483  	}
5484  }
5485  
5486  /**
5487   * of_clk_detect_critical() - set CLK_IS_CRITICAL flag from Device Tree
5488   * @np: Device node pointer associated with clock provider
5489   * @index: clock index
5490   * @flags: pointer to top-level framework flags
5491   *
5492   * Detects if the clock-critical property exists and, if so, sets the
5493   * corresponding CLK_IS_CRITICAL flag.
5494   *
5495   * Do not use this function. It exists only for legacy Device Tree
5496   * bindings, such as the one-clock-per-node style that are outdated.
5497   * Those bindings typically put all clock data into .dts and the Linux
5498   * driver has no clock data, thus making it impossible to set this flag
5499   * correctly from the driver. Only those drivers may call
5500   * of_clk_detect_critical from their setup functions.
5501   *
5502   * Return: error code or zero on success
5503   */
of_clk_detect_critical(struct device_node * np,int index,unsigned long * flags)5504  int of_clk_detect_critical(struct device_node *np, int index,
5505  			   unsigned long *flags)
5506  {
5507  	uint32_t idx;
5508  
5509  	if (!np || !flags)
5510  		return -EINVAL;
5511  
5512  	of_property_for_each_u32(np, "clock-critical", idx)
5513  		if (index == idx)
5514  			*flags |= CLK_IS_CRITICAL;
5515  
5516  	return 0;
5517  }
5518  
5519  /**
5520   * of_clk_init() - Scan and init clock providers from the DT
5521   * @matches: array of compatible values and init functions for providers.
5522   *
5523   * This function scans the device tree for matching clock providers
5524   * and calls their initialization functions. It also does it by trying
5525   * to follow the dependencies.
5526   */
of_clk_init(const struct of_device_id * matches)5527  void __init of_clk_init(const struct of_device_id *matches)
5528  {
5529  	const struct of_device_id *match;
5530  	struct device_node *np;
5531  	struct clock_provider *clk_provider, *next;
5532  	bool is_init_done;
5533  	bool force = false;
5534  	LIST_HEAD(clk_provider_list);
5535  
5536  	if (!matches)
5537  		matches = &__clk_of_table;
5538  
5539  	/* First prepare the list of the clocks providers */
5540  	for_each_matching_node_and_match(np, matches, &match) {
5541  		struct clock_provider *parent;
5542  
5543  		if (!of_device_is_available(np))
5544  			continue;
5545  
5546  		parent = kzalloc(sizeof(*parent), GFP_KERNEL);
5547  		if (!parent) {
5548  			list_for_each_entry_safe(clk_provider, next,
5549  						 &clk_provider_list, node) {
5550  				list_del(&clk_provider->node);
5551  				of_node_put(clk_provider->np);
5552  				kfree(clk_provider);
5553  			}
5554  			of_node_put(np);
5555  			return;
5556  		}
5557  
5558  		parent->clk_init_cb = match->data;
5559  		parent->np = of_node_get(np);
5560  		list_add_tail(&parent->node, &clk_provider_list);
5561  	}
5562  
5563  	while (!list_empty(&clk_provider_list)) {
5564  		is_init_done = false;
5565  		list_for_each_entry_safe(clk_provider, next,
5566  					&clk_provider_list, node) {
5567  			if (force || parent_ready(clk_provider->np)) {
5568  
5569  				/* Don't populate platform devices */
5570  				of_node_set_flag(clk_provider->np,
5571  						 OF_POPULATED);
5572  
5573  				clk_provider->clk_init_cb(clk_provider->np);
5574  				of_clk_set_defaults(clk_provider->np, true);
5575  
5576  				list_del(&clk_provider->node);
5577  				of_node_put(clk_provider->np);
5578  				kfree(clk_provider);
5579  				is_init_done = true;
5580  			}
5581  		}
5582  
5583  		/*
5584  		 * We didn't manage to initialize any of the
5585  		 * remaining providers during the last loop, so now we
5586  		 * initialize all the remaining ones unconditionally
5587  		 * in case the clock parent was not mandatory
5588  		 */
5589  		if (!is_init_done)
5590  			force = true;
5591  	}
5592  }
5593  #endif
5594