xref: /linux/drivers/clk/imx/clk-composite-8m.c (revision 2d945dde7fa3f17f46349360a9f97614de9f47da)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright 2018 NXP
4  */
5 
6 #include <linux/clk-provider.h>
7 #include <linux/errno.h>
8 #include <linux/export.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
11 
12 #include "clk.h"
13 
14 #define PCG_PREDIV_SHIFT	16
15 #define PCG_PREDIV_WIDTH	3
16 #define PCG_PREDIV_MAX		8
17 
18 #define PCG_DIV_SHIFT		0
19 #define PCG_CORE_DIV_WIDTH	3
20 #define PCG_DIV_WIDTH		6
21 #define PCG_DIV_MAX		64
22 
23 #define PCG_PCS_SHIFT		24
24 #define PCG_PCS_MASK		0x7
25 
26 #define PCG_CGC_SHIFT		28
27 
imx8m_clk_composite_divider_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)28 static unsigned long imx8m_clk_composite_divider_recalc_rate(struct clk_hw *hw,
29 						unsigned long parent_rate)
30 {
31 	struct clk_divider *divider = to_clk_divider(hw);
32 	unsigned long prediv_rate;
33 	unsigned int prediv_value;
34 	unsigned int div_value;
35 
36 	prediv_value = readl(divider->reg) >> divider->shift;
37 	prediv_value &= clk_div_mask(divider->width);
38 
39 	prediv_rate = divider_recalc_rate(hw, parent_rate, prediv_value,
40 						NULL, divider->flags,
41 						divider->width);
42 
43 	div_value = readl(divider->reg) >> PCG_DIV_SHIFT;
44 	div_value &= clk_div_mask(PCG_DIV_WIDTH);
45 
46 	return divider_recalc_rate(hw, prediv_rate, div_value, NULL,
47 				   divider->flags, PCG_DIV_WIDTH);
48 }
49 
imx8m_clk_composite_compute_dividers(unsigned long rate,unsigned long parent_rate,int * prediv,int * postdiv)50 static int imx8m_clk_composite_compute_dividers(unsigned long rate,
51 						unsigned long parent_rate,
52 						int *prediv, int *postdiv)
53 {
54 	int div1, div2;
55 	int error = INT_MAX;
56 	int ret = -EINVAL;
57 
58 	*prediv = 1;
59 	*postdiv = 1;
60 
61 	for (div1 = 1; div1 <= PCG_PREDIV_MAX; div1++) {
62 		for (div2 = 1; div2 <= PCG_DIV_MAX; div2++) {
63 			int new_error = ((parent_rate / div1) / div2) - rate;
64 
65 			if (abs(new_error) < abs(error)) {
66 				*prediv = div1;
67 				*postdiv = div2;
68 				error = new_error;
69 				ret = 0;
70 			}
71 		}
72 	}
73 	return ret;
74 }
75 
imx8m_clk_composite_divider_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)76 static int imx8m_clk_composite_divider_set_rate(struct clk_hw *hw,
77 					unsigned long rate,
78 					unsigned long parent_rate)
79 {
80 	struct clk_divider *divider = to_clk_divider(hw);
81 	unsigned long flags;
82 	int prediv_value;
83 	int div_value;
84 	int ret;
85 	u32 orig, val;
86 
87 	ret = imx8m_clk_composite_compute_dividers(rate, parent_rate,
88 						&prediv_value, &div_value);
89 	if (ret)
90 		return -EINVAL;
91 
92 	spin_lock_irqsave(divider->lock, flags);
93 
94 	orig = readl(divider->reg);
95 	val = orig & ~((clk_div_mask(divider->width) << divider->shift) |
96 		       (clk_div_mask(PCG_DIV_WIDTH) << PCG_DIV_SHIFT));
97 
98 	val |= (u32)(prediv_value  - 1) << divider->shift;
99 	val |= (u32)(div_value - 1) << PCG_DIV_SHIFT;
100 
101 	if (val != orig)
102 		writel(val, divider->reg);
103 
104 	spin_unlock_irqrestore(divider->lock, flags);
105 
106 	return ret;
107 }
108 
imx8m_divider_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)109 static int imx8m_divider_determine_rate(struct clk_hw *hw,
110 				      struct clk_rate_request *req)
111 {
112 	struct clk_divider *divider = to_clk_divider(hw);
113 	int prediv_value;
114 	int div_value;
115 
116 	/* if read only, just return current value */
117 	if (divider->flags & CLK_DIVIDER_READ_ONLY) {
118 		u32 val;
119 
120 		val = readl(divider->reg);
121 		prediv_value = val >> divider->shift;
122 		prediv_value &= clk_div_mask(divider->width);
123 		prediv_value++;
124 
125 		div_value = val >> PCG_DIV_SHIFT;
126 		div_value &= clk_div_mask(PCG_DIV_WIDTH);
127 		div_value++;
128 
129 		return divider_ro_determine_rate(hw, req, divider->table,
130 						 PCG_PREDIV_WIDTH + PCG_DIV_WIDTH,
131 						 divider->flags, prediv_value * div_value);
132 	}
133 
134 	return divider_determine_rate(hw, req, divider->table,
135 				      PCG_PREDIV_WIDTH + PCG_DIV_WIDTH,
136 				      divider->flags);
137 }
138 
139 static const struct clk_ops imx8m_clk_composite_divider_ops = {
140 	.recalc_rate = imx8m_clk_composite_divider_recalc_rate,
141 	.set_rate = imx8m_clk_composite_divider_set_rate,
142 	.determine_rate = imx8m_divider_determine_rate,
143 };
144 
imx8m_clk_composite_mux_get_parent(struct clk_hw * hw)145 static u8 imx8m_clk_composite_mux_get_parent(struct clk_hw *hw)
146 {
147 	return clk_mux_ops.get_parent(hw);
148 }
149 
imx8m_clk_composite_mux_set_parent(struct clk_hw * hw,u8 index)150 static int imx8m_clk_composite_mux_set_parent(struct clk_hw *hw, u8 index)
151 {
152 	struct clk_mux *mux = to_clk_mux(hw);
153 	u32 val = clk_mux_index_to_val(mux->table, mux->flags, index);
154 	unsigned long flags = 0;
155 	u32 reg;
156 
157 	if (mux->lock)
158 		spin_lock_irqsave(mux->lock, flags);
159 
160 	reg = readl(mux->reg);
161 	reg &= ~(mux->mask << mux->shift);
162 	val = val << mux->shift;
163 	reg |= val;
164 	/*
165 	 * write twice to make sure non-target interface
166 	 * SEL_A/B point the same clk input.
167 	 */
168 	writel(reg, mux->reg);
169 	writel(reg, mux->reg);
170 
171 	if (mux->lock)
172 		spin_unlock_irqrestore(mux->lock, flags);
173 
174 	return 0;
175 }
176 
177 static int
imx8m_clk_composite_mux_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)178 imx8m_clk_composite_mux_determine_rate(struct clk_hw *hw,
179 				       struct clk_rate_request *req)
180 {
181 	return clk_mux_ops.determine_rate(hw, req);
182 }
183 
184 
185 static const struct clk_ops imx8m_clk_composite_mux_ops = {
186 	.get_parent = imx8m_clk_composite_mux_get_parent,
187 	.set_parent = imx8m_clk_composite_mux_set_parent,
188 	.determine_rate = imx8m_clk_composite_mux_determine_rate,
189 };
190 
imx8m_clk_composite_gate_enable(struct clk_hw * hw)191 static int imx8m_clk_composite_gate_enable(struct clk_hw *hw)
192 {
193 	struct clk_gate *gate = to_clk_gate(hw);
194 	unsigned long flags;
195 	u32 val;
196 
197 	spin_lock_irqsave(gate->lock, flags);
198 
199 	val = readl(gate->reg);
200 	val |= BIT(gate->bit_idx);
201 	writel(val, gate->reg);
202 
203 	spin_unlock_irqrestore(gate->lock, flags);
204 
205 	return 0;
206 }
207 
imx8m_clk_composite_gate_disable(struct clk_hw * hw)208 static void imx8m_clk_composite_gate_disable(struct clk_hw *hw)
209 {
210 	/* composite clk requires the disable hook */
211 }
212 
213 static const struct clk_ops imx8m_clk_composite_gate_ops = {
214 	.enable = imx8m_clk_composite_gate_enable,
215 	.disable = imx8m_clk_composite_gate_disable,
216 	.is_enabled = clk_gate_is_enabled,
217 };
218 
__imx8m_clk_hw_composite(const char * name,const char * const * parent_names,int num_parents,void __iomem * reg,u32 composite_flags,unsigned long flags)219 struct clk_hw *__imx8m_clk_hw_composite(const char *name,
220 					const char * const *parent_names,
221 					int num_parents, void __iomem *reg,
222 					u32 composite_flags,
223 					unsigned long flags)
224 {
225 	struct clk_hw *hw = ERR_PTR(-ENOMEM), *mux_hw;
226 	struct clk_hw *div_hw, *gate_hw = NULL;
227 	struct clk_divider *div;
228 	struct clk_gate *gate = NULL;
229 	struct clk_mux *mux;
230 	const struct clk_ops *divider_ops;
231 	const struct clk_ops *mux_ops;
232 	const struct clk_ops *gate_ops;
233 
234 	mux = kzalloc(sizeof(*mux), GFP_KERNEL);
235 	if (!mux)
236 		return ERR_CAST(hw);
237 
238 	mux_hw = &mux->hw;
239 	mux->reg = reg;
240 	mux->shift = PCG_PCS_SHIFT;
241 	mux->mask = PCG_PCS_MASK;
242 	mux->lock = &imx_ccm_lock;
243 
244 	div = kzalloc(sizeof(*div), GFP_KERNEL);
245 	if (!div)
246 		goto free_mux;
247 
248 	div_hw = &div->hw;
249 	div->reg = reg;
250 	if (composite_flags & IMX_COMPOSITE_CORE) {
251 		div->shift = PCG_DIV_SHIFT;
252 		div->width = PCG_CORE_DIV_WIDTH;
253 		divider_ops = &clk_divider_ops;
254 		mux_ops = &imx8m_clk_composite_mux_ops;
255 	} else if (composite_flags & IMX_COMPOSITE_BUS) {
256 		div->shift = PCG_PREDIV_SHIFT;
257 		div->width = PCG_PREDIV_WIDTH;
258 		divider_ops = &imx8m_clk_composite_divider_ops;
259 		mux_ops = &imx8m_clk_composite_mux_ops;
260 	} else {
261 		div->shift = PCG_PREDIV_SHIFT;
262 		div->width = PCG_PREDIV_WIDTH;
263 		divider_ops = &imx8m_clk_composite_divider_ops;
264 		mux_ops = &clk_mux_ops;
265 		if (!(composite_flags & IMX_COMPOSITE_FW_MANAGED))
266 			flags |= CLK_SET_PARENT_GATE;
267 	}
268 
269 	div->lock = &imx_ccm_lock;
270 	div->flags = CLK_DIVIDER_ROUND_CLOSEST;
271 
272 	/* skip registering the gate ops if M4 is enabled */
273 	gate = kzalloc(sizeof(*gate), GFP_KERNEL);
274 	if (!gate)
275 		goto free_div;
276 
277 	gate_hw = &gate->hw;
278 	gate->reg = reg;
279 	gate->bit_idx = PCG_CGC_SHIFT;
280 	gate->lock = &imx_ccm_lock;
281 	if (!mcore_booted)
282 		gate_ops = &clk_gate_ops;
283 	else
284 		gate_ops = &imx8m_clk_composite_gate_ops;
285 
286 	hw = clk_hw_register_composite(NULL, name, parent_names, num_parents,
287 			mux_hw, mux_ops, div_hw,
288 			divider_ops, gate_hw, gate_ops, flags);
289 	if (IS_ERR(hw))
290 		goto free_gate;
291 
292 	return hw;
293 
294 free_gate:
295 	kfree(gate);
296 free_div:
297 	kfree(div);
298 free_mux:
299 	kfree(mux);
300 	return ERR_CAST(hw);
301 }
302 EXPORT_SYMBOL_GPL(__imx8m_clk_hw_composite);
303