1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2016 Maxime Ripard
4  * Maxime Ripard <maxime.ripard@free-electrons.com>
5  */
6 
7 #include <linux/clk-provider.h>
8 #include <linux/io.h>
9 
10 #include "ccu_gate.h"
11 #include "ccu_nkmp.h"
12 
13 struct _ccu_nkmp {
14 	unsigned long	n, min_n, max_n;
15 	unsigned long	k, min_k, max_k;
16 	unsigned long	m, min_m, max_m;
17 	unsigned long	p, min_p, max_p;
18 };
19 
ccu_nkmp_calc_rate(unsigned long parent,unsigned long n,unsigned long k,unsigned long m,unsigned long p)20 static unsigned long ccu_nkmp_calc_rate(unsigned long parent,
21 					unsigned long n, unsigned long k,
22 					unsigned long m, unsigned long p)
23 {
24 	u64 rate = parent;
25 
26 	rate *= n * k;
27 	do_div(rate, m * p);
28 
29 	return rate;
30 }
31 
ccu_nkmp_find_best(unsigned long parent,unsigned long rate,struct _ccu_nkmp * nkmp)32 static unsigned long ccu_nkmp_find_best(unsigned long parent, unsigned long rate,
33 					struct _ccu_nkmp *nkmp)
34 {
35 	unsigned long best_rate = 0;
36 	unsigned long best_n = 0, best_k = 0, best_m = 0, best_p = 0;
37 	unsigned long _n, _k, _m, _p;
38 
39 	for (_k = nkmp->min_k; _k <= nkmp->max_k; _k++) {
40 		for (_n = nkmp->min_n; _n <= nkmp->max_n; _n++) {
41 			for (_m = nkmp->min_m; _m <= nkmp->max_m; _m++) {
42 				for (_p = nkmp->min_p; _p <= nkmp->max_p; _p <<= 1) {
43 					unsigned long tmp_rate;
44 
45 					tmp_rate = ccu_nkmp_calc_rate(parent,
46 								      _n, _k,
47 								      _m, _p);
48 
49 					if (tmp_rate > rate)
50 						continue;
51 
52 					if ((rate - tmp_rate) < (rate - best_rate)) {
53 						best_rate = tmp_rate;
54 						best_n = _n;
55 						best_k = _k;
56 						best_m = _m;
57 						best_p = _p;
58 					}
59 				}
60 			}
61 		}
62 	}
63 
64 	nkmp->n = best_n;
65 	nkmp->k = best_k;
66 	nkmp->m = best_m;
67 	nkmp->p = best_p;
68 
69 	return best_rate;
70 }
71 
ccu_nkmp_disable(struct clk_hw * hw)72 static void ccu_nkmp_disable(struct clk_hw *hw)
73 {
74 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
75 
76 	return ccu_gate_helper_disable(&nkmp->common, nkmp->enable);
77 }
78 
ccu_nkmp_enable(struct clk_hw * hw)79 static int ccu_nkmp_enable(struct clk_hw *hw)
80 {
81 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
82 
83 	return ccu_gate_helper_enable(&nkmp->common, nkmp->enable);
84 }
85 
ccu_nkmp_is_enabled(struct clk_hw * hw)86 static int ccu_nkmp_is_enabled(struct clk_hw *hw)
87 {
88 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
89 
90 	return ccu_gate_helper_is_enabled(&nkmp->common, nkmp->enable);
91 }
92 
ccu_nkmp_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)93 static unsigned long ccu_nkmp_recalc_rate(struct clk_hw *hw,
94 					unsigned long parent_rate)
95 {
96 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
97 	unsigned long n, m, k, p, rate;
98 	u32 reg;
99 
100 	reg = readl(nkmp->common.base + nkmp->common.reg);
101 
102 	n = reg >> nkmp->n.shift;
103 	n &= (1 << nkmp->n.width) - 1;
104 	n += nkmp->n.offset;
105 	if (!n)
106 		n++;
107 
108 	k = reg >> nkmp->k.shift;
109 	k &= (1 << nkmp->k.width) - 1;
110 	k += nkmp->k.offset;
111 	if (!k)
112 		k++;
113 
114 	m = reg >> nkmp->m.shift;
115 	m &= (1 << nkmp->m.width) - 1;
116 	m += nkmp->m.offset;
117 	if (!m)
118 		m++;
119 
120 	p = reg >> nkmp->p.shift;
121 	p &= (1 << nkmp->p.width) - 1;
122 
123 	rate = ccu_nkmp_calc_rate(parent_rate, n, k, m, 1 << p);
124 	if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
125 		rate /= nkmp->fixed_post_div;
126 
127 	return rate;
128 }
129 
ccu_nkmp_round_rate(struct clk_hw * hw,unsigned long rate,unsigned long * parent_rate)130 static long ccu_nkmp_round_rate(struct clk_hw *hw, unsigned long rate,
131 			      unsigned long *parent_rate)
132 {
133 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
134 	struct _ccu_nkmp _nkmp;
135 
136 	if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
137 		rate *= nkmp->fixed_post_div;
138 
139 	if (nkmp->max_rate && rate > nkmp->max_rate) {
140 		rate = nkmp->max_rate;
141 		if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
142 			rate /= nkmp->fixed_post_div;
143 		return rate;
144 	}
145 
146 	_nkmp.min_n = nkmp->n.min ?: 1;
147 	_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
148 	_nkmp.min_k = nkmp->k.min ?: 1;
149 	_nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
150 	_nkmp.min_m = 1;
151 	_nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
152 	_nkmp.min_p = 1;
153 	_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
154 
155 	rate = ccu_nkmp_find_best(*parent_rate, rate, &_nkmp);
156 
157 	if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
158 		rate = rate / nkmp->fixed_post_div;
159 
160 	return rate;
161 }
162 
ccu_nkmp_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)163 static int ccu_nkmp_set_rate(struct clk_hw *hw, unsigned long rate,
164 			   unsigned long parent_rate)
165 {
166 	struct ccu_nkmp *nkmp = hw_to_ccu_nkmp(hw);
167 	u32 n_mask = 0, k_mask = 0, m_mask = 0, p_mask = 0;
168 	struct _ccu_nkmp _nkmp;
169 	unsigned long flags;
170 	u32 reg;
171 
172 	if (nkmp->common.features & CCU_FEATURE_FIXED_POSTDIV)
173 		rate = rate * nkmp->fixed_post_div;
174 
175 	_nkmp.min_n = nkmp->n.min ?: 1;
176 	_nkmp.max_n = nkmp->n.max ?: 1 << nkmp->n.width;
177 	_nkmp.min_k = nkmp->k.min ?: 1;
178 	_nkmp.max_k = nkmp->k.max ?: 1 << nkmp->k.width;
179 	_nkmp.min_m = 1;
180 	_nkmp.max_m = nkmp->m.max ?: 1 << nkmp->m.width;
181 	_nkmp.min_p = 1;
182 	_nkmp.max_p = nkmp->p.max ?: 1 << ((1 << nkmp->p.width) - 1);
183 
184 	ccu_nkmp_find_best(parent_rate, rate, &_nkmp);
185 
186 	/*
187 	 * If width is 0, GENMASK() macro may not generate expected mask (0)
188 	 * as it falls under undefined behaviour by C standard due to shifts
189 	 * which are equal or greater than width of left operand. This can
190 	 * be easily avoided by explicitly checking if width is 0.
191 	 */
192 	if (nkmp->n.width)
193 		n_mask = GENMASK(nkmp->n.width + nkmp->n.shift - 1,
194 				 nkmp->n.shift);
195 	if (nkmp->k.width)
196 		k_mask = GENMASK(nkmp->k.width + nkmp->k.shift - 1,
197 				 nkmp->k.shift);
198 	if (nkmp->m.width)
199 		m_mask = GENMASK(nkmp->m.width + nkmp->m.shift - 1,
200 				 nkmp->m.shift);
201 	if (nkmp->p.width)
202 		p_mask = GENMASK(nkmp->p.width + nkmp->p.shift - 1,
203 				 nkmp->p.shift);
204 
205 	spin_lock_irqsave(nkmp->common.lock, flags);
206 
207 	reg = readl(nkmp->common.base + nkmp->common.reg);
208 	reg &= ~(n_mask | k_mask | m_mask | p_mask);
209 
210 	reg |= ((_nkmp.n - nkmp->n.offset) << nkmp->n.shift) & n_mask;
211 	reg |= ((_nkmp.k - nkmp->k.offset) << nkmp->k.shift) & k_mask;
212 	reg |= ((_nkmp.m - nkmp->m.offset) << nkmp->m.shift) & m_mask;
213 	reg |= (ilog2(_nkmp.p) << nkmp->p.shift) & p_mask;
214 
215 	writel(reg, nkmp->common.base + nkmp->common.reg);
216 
217 	spin_unlock_irqrestore(nkmp->common.lock, flags);
218 
219 	ccu_helper_wait_for_lock(&nkmp->common, nkmp->lock);
220 
221 	return 0;
222 }
223 
224 const struct clk_ops ccu_nkmp_ops = {
225 	.disable	= ccu_nkmp_disable,
226 	.enable		= ccu_nkmp_enable,
227 	.is_enabled	= ccu_nkmp_is_enabled,
228 
229 	.recalc_rate	= ccu_nkmp_recalc_rate,
230 	.round_rate	= ccu_nkmp_round_rate,
231 	.set_rate	= ccu_nkmp_set_rate,
232 };
233 EXPORT_SYMBOL_NS_GPL(ccu_nkmp_ops, SUNXI_CCU);
234