1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2018, The Linux Foundation. All rights reserved.
3 
4 #include <linux/kernel.h>
5 #include <linux/init.h>
6 #include <linux/module.h>
7 #include <linux/platform_device.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/of.h>
11 #include <linux/clk.h>
12 #include <linux/clk-provider.h>
13 #include <linux/slab.h>
14 
15 #include "clk-krait.h"
16 
17 enum {
18 	cpu0_mux = 0,
19 	cpu1_mux,
20 	cpu2_mux,
21 	cpu3_mux,
22 	l2_mux,
23 
24 	clks_max,
25 };
26 
27 static unsigned int sec_mux_map[] = {
28 	2,
29 	0,
30 };
31 
32 static unsigned int pri_mux_map[] = {
33 	1,
34 	2,
35 	0,
36 };
37 
38 /*
39  * Notifier function for switching the muxes to safe parent
40  * while the hfpll is getting reprogrammed.
41  */
krait_notifier_cb(struct notifier_block * nb,unsigned long event,void * data)42 static int krait_notifier_cb(struct notifier_block *nb,
43 			     unsigned long event,
44 			     void *data)
45 {
46 	int ret = 0;
47 	struct krait_mux_clk *mux = container_of(nb, struct krait_mux_clk,
48 						 clk_nb);
49 	/* Switch to safe parent */
50 	if (event == PRE_RATE_CHANGE) {
51 		mux->old_index = krait_mux_clk_ops.get_parent(&mux->hw);
52 		ret = krait_mux_clk_ops.set_parent(&mux->hw, mux->safe_sel);
53 		mux->reparent = false;
54 	/*
55 	 * By the time POST_RATE_CHANGE notifier is called,
56 	 * clk framework itself would have changed the parent for the new rate.
57 	 * Only otherwise, put back to the old parent.
58 	 */
59 	} else if (event == POST_RATE_CHANGE) {
60 		if (!mux->reparent)
61 			ret = krait_mux_clk_ops.set_parent(&mux->hw,
62 							   mux->old_index);
63 	}
64 
65 	return notifier_from_errno(ret);
66 }
67 
krait_notifier_register(struct device * dev,struct clk * clk,struct krait_mux_clk * mux)68 static int krait_notifier_register(struct device *dev, struct clk *clk,
69 				   struct krait_mux_clk *mux)
70 {
71 	int ret = 0;
72 
73 	mux->clk_nb.notifier_call = krait_notifier_cb;
74 	ret = devm_clk_notifier_register(dev, clk, &mux->clk_nb);
75 	if (ret)
76 		dev_err(dev, "failed to register clock notifier: %d\n", ret);
77 
78 	return ret;
79 }
80 
81 static struct clk_hw *
krait_add_div(struct device * dev,int id,const char * s,unsigned int offset)82 krait_add_div(struct device *dev, int id, const char *s, unsigned int offset)
83 {
84 	struct krait_div2_clk *div;
85 	static struct clk_parent_data p_data[1];
86 	struct clk_init_data init = {
87 		.num_parents = ARRAY_SIZE(p_data),
88 		.ops = &krait_div2_clk_ops,
89 		.flags = CLK_SET_RATE_PARENT,
90 	};
91 	struct clk_hw *clk;
92 	char *parent_name;
93 	int cpu, ret;
94 
95 	div = devm_kzalloc(dev, sizeof(*div), GFP_KERNEL);
96 	if (!div)
97 		return ERR_PTR(-ENOMEM);
98 
99 	div->width = 2;
100 	div->shift = 6;
101 	div->lpl = id >= 0;
102 	div->offset = offset;
103 	div->hw.init = &init;
104 
105 	init.name = kasprintf(GFP_KERNEL, "hfpll%s_div", s);
106 	if (!init.name)
107 		return ERR_PTR(-ENOMEM);
108 
109 	init.parent_data = p_data;
110 	parent_name = kasprintf(GFP_KERNEL, "hfpll%s", s);
111 	if (!parent_name) {
112 		clk = ERR_PTR(-ENOMEM);
113 		goto err_parent_name;
114 	}
115 
116 	p_data[0].fw_name = parent_name;
117 	p_data[0].name = parent_name;
118 
119 	ret = devm_clk_hw_register(dev, &div->hw);
120 	if (ret) {
121 		clk = ERR_PTR(ret);
122 		goto err_clk;
123 	}
124 
125 	clk = &div->hw;
126 
127 	/* clk-krait ignore any rate change if mux is not flagged as enabled */
128 	if (id < 0)
129 		for_each_online_cpu(cpu)
130 			clk_prepare_enable(div->hw.clk);
131 	else
132 		clk_prepare_enable(div->hw.clk);
133 
134 err_clk:
135 	kfree(parent_name);
136 err_parent_name:
137 	kfree(init.name);
138 
139 	return clk;
140 }
141 
142 static struct clk_hw *
krait_add_sec_mux(struct device * dev,int id,const char * s,unsigned int offset,bool unique_aux)143 krait_add_sec_mux(struct device *dev, int id, const char *s,
144 		  unsigned int offset, bool unique_aux)
145 {
146 	int cpu, ret;
147 	struct krait_mux_clk *mux;
148 	static struct clk_parent_data sec_mux_list[2] = {
149 		{ .name = "qsb", .fw_name = "qsb" },
150 		{},
151 	};
152 	struct clk_init_data init = {
153 		.parent_data = sec_mux_list,
154 		.num_parents = ARRAY_SIZE(sec_mux_list),
155 		.ops = &krait_mux_clk_ops,
156 		.flags = CLK_SET_RATE_PARENT,
157 	};
158 	struct clk_hw *clk;
159 	char *parent_name;
160 
161 	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
162 	if (!mux)
163 		return ERR_PTR(-ENOMEM);
164 
165 	mux->offset = offset;
166 	mux->lpl = id >= 0;
167 	mux->mask = 0x3;
168 	mux->shift = 2;
169 	mux->parent_map = sec_mux_map;
170 	mux->hw.init = &init;
171 	mux->safe_sel = 0;
172 
173 	/* Checking for qcom,krait-cc-v1 or qcom,krait-cc-v2 is not
174 	 * enough to limit this to apq/ipq8064. Directly check machine
175 	 * compatible to correctly handle this errata.
176 	 */
177 	if (of_machine_is_compatible("qcom,ipq8064") ||
178 	    of_machine_is_compatible("qcom,apq8064"))
179 		mux->disable_sec_src_gating = true;
180 
181 	init.name = kasprintf(GFP_KERNEL, "krait%s_sec_mux", s);
182 	if (!init.name)
183 		return ERR_PTR(-ENOMEM);
184 
185 	if (unique_aux) {
186 		parent_name = kasprintf(GFP_KERNEL, "acpu%s_aux", s);
187 		if (!parent_name) {
188 			clk = ERR_PTR(-ENOMEM);
189 			goto err_aux;
190 		}
191 		sec_mux_list[1].fw_name = parent_name;
192 		sec_mux_list[1].name = parent_name;
193 	} else {
194 		sec_mux_list[1].name = "apu_aux";
195 	}
196 
197 	ret = devm_clk_hw_register(dev, &mux->hw);
198 	if (ret) {
199 		clk = ERR_PTR(ret);
200 		goto err_clk;
201 	}
202 
203 	clk = &mux->hw;
204 
205 	ret = krait_notifier_register(dev, mux->hw.clk, mux);
206 	if (ret) {
207 		clk = ERR_PTR(ret);
208 		goto err_clk;
209 	}
210 
211 	/* clk-krait ignore any rate change if mux is not flagged as enabled */
212 	if (id < 0)
213 		for_each_online_cpu(cpu)
214 			clk_prepare_enable(mux->hw.clk);
215 	else
216 		clk_prepare_enable(mux->hw.clk);
217 
218 err_clk:
219 	if (unique_aux)
220 		kfree(parent_name);
221 err_aux:
222 	kfree(init.name);
223 	return clk;
224 }
225 
226 static struct clk_hw *
krait_add_pri_mux(struct device * dev,struct clk_hw * hfpll_div,struct clk_hw * sec_mux,int id,const char * s,unsigned int offset)227 krait_add_pri_mux(struct device *dev, struct clk_hw *hfpll_div, struct clk_hw *sec_mux,
228 		  int id, const char *s, unsigned int offset)
229 {
230 	int ret;
231 	struct krait_mux_clk *mux;
232 	static struct clk_parent_data p_data[3];
233 	struct clk_init_data init = {
234 		.parent_data = p_data,
235 		.num_parents = ARRAY_SIZE(p_data),
236 		.ops = &krait_mux_clk_ops,
237 		.flags = CLK_SET_RATE_PARENT,
238 	};
239 	struct clk_hw *clk;
240 	char *hfpll_name;
241 
242 	mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL);
243 	if (!mux)
244 		return ERR_PTR(-ENOMEM);
245 
246 	mux->mask = 0x3;
247 	mux->shift = 0;
248 	mux->offset = offset;
249 	mux->lpl = id >= 0;
250 	mux->parent_map = pri_mux_map;
251 	mux->hw.init = &init;
252 	mux->safe_sel = 2;
253 
254 	init.name = kasprintf(GFP_KERNEL, "krait%s_pri_mux", s);
255 	if (!init.name)
256 		return ERR_PTR(-ENOMEM);
257 
258 	hfpll_name = kasprintf(GFP_KERNEL, "hfpll%s", s);
259 	if (!hfpll_name) {
260 		clk = ERR_PTR(-ENOMEM);
261 		goto err_hfpll;
262 	}
263 
264 	p_data[0].fw_name = hfpll_name;
265 	p_data[0].name = hfpll_name;
266 
267 	p_data[1].hw = hfpll_div;
268 	p_data[2].hw = sec_mux;
269 
270 	ret = devm_clk_hw_register(dev, &mux->hw);
271 	if (ret) {
272 		clk = ERR_PTR(ret);
273 		goto err_clk;
274 	}
275 
276 	clk = &mux->hw;
277 
278 	ret = krait_notifier_register(dev, mux->hw.clk, mux);
279 	if (ret)
280 		clk = ERR_PTR(ret);
281 
282 err_clk:
283 	kfree(hfpll_name);
284 err_hfpll:
285 	kfree(init.name);
286 	return clk;
287 }
288 
289 /* id < 0 for L2, otherwise id == physical CPU number */
krait_add_clks(struct device * dev,int id,bool unique_aux)290 static struct clk_hw *krait_add_clks(struct device *dev, int id, bool unique_aux)
291 {
292 	struct clk_hw *hfpll_div, *sec_mux, *pri_mux;
293 	unsigned int offset;
294 	void *p = NULL;
295 	const char *s;
296 
297 	if (id >= 0) {
298 		offset = 0x4501 + (0x1000 * id);
299 		s = p = kasprintf(GFP_KERNEL, "%d", id);
300 		if (!s)
301 			return ERR_PTR(-ENOMEM);
302 	} else {
303 		offset = 0x500;
304 		s = "_l2";
305 	}
306 
307 	hfpll_div = krait_add_div(dev, id, s, offset);
308 	if (IS_ERR(hfpll_div)) {
309 		pri_mux = hfpll_div;
310 		goto err;
311 	}
312 
313 	sec_mux = krait_add_sec_mux(dev, id, s, offset, unique_aux);
314 	if (IS_ERR(sec_mux)) {
315 		pri_mux = sec_mux;
316 		goto err;
317 	}
318 
319 	pri_mux = krait_add_pri_mux(dev, hfpll_div, sec_mux, id, s, offset);
320 
321 err:
322 	kfree(p);
323 	return pri_mux;
324 }
325 
krait_of_get(struct of_phandle_args * clkspec,void * data)326 static struct clk *krait_of_get(struct of_phandle_args *clkspec, void *data)
327 {
328 	unsigned int idx = clkspec->args[0];
329 	struct clk **clks = data;
330 
331 	if (idx >= clks_max) {
332 		pr_err("%s: invalid clock index %d\n", __func__, idx);
333 		return ERR_PTR(-EINVAL);
334 	}
335 
336 	return clks[idx] ? : ERR_PTR(-ENODEV);
337 }
338 
339 static const struct of_device_id krait_cc_match_table[] = {
340 	{ .compatible = "qcom,krait-cc-v1", (void *)1UL },
341 	{ .compatible = "qcom,krait-cc-v2" },
342 	{}
343 };
344 MODULE_DEVICE_TABLE(of, krait_cc_match_table);
345 
krait_cc_probe(struct platform_device * pdev)346 static int krait_cc_probe(struct platform_device *pdev)
347 {
348 	struct device *dev = &pdev->dev;
349 	unsigned long cur_rate, aux_rate;
350 	int cpu;
351 	struct clk_hw *mux, *l2_pri_mux;
352 	struct clk *clk, **clks;
353 	bool unique_aux = !!device_get_match_data(dev);
354 
355 	/* Rate is 1 because 0 causes problems for __clk_mux_determine_rate */
356 	clk = clk_register_fixed_rate(dev, "qsb", NULL, 0, 1);
357 	if (IS_ERR(clk))
358 		return PTR_ERR(clk);
359 
360 	if (!unique_aux) {
361 		clk = clk_register_fixed_factor(dev, "acpu_aux",
362 						"gpll0_vote", 0, 1, 2);
363 		if (IS_ERR(clk))
364 			return PTR_ERR(clk);
365 	}
366 
367 	/* Krait configurations have at most 4 CPUs and one L2 */
368 	clks = devm_kcalloc(dev, clks_max, sizeof(*clks), GFP_KERNEL);
369 	if (!clks)
370 		return -ENOMEM;
371 
372 	for_each_possible_cpu(cpu) {
373 		mux = krait_add_clks(dev, cpu, unique_aux);
374 		if (IS_ERR(mux))
375 			return PTR_ERR(mux);
376 		clks[cpu] = mux->clk;
377 	}
378 
379 	l2_pri_mux = krait_add_clks(dev, -1, unique_aux);
380 	if (IS_ERR(l2_pri_mux))
381 		return PTR_ERR(l2_pri_mux);
382 	clks[l2_mux] = l2_pri_mux->clk;
383 
384 	/*
385 	 * We don't want the CPU or L2 clocks to be turned off at late init
386 	 * if CPUFREQ or HOTPLUG configs are disabled. So, bump up the
387 	 * refcount of these clocks. Any cpufreq/hotplug manager can assume
388 	 * that the clocks have already been prepared and enabled by the time
389 	 * they take over.
390 	 */
391 	for_each_online_cpu(cpu) {
392 		clk_prepare_enable(clks[l2_mux]);
393 		WARN(clk_prepare_enable(clks[cpu]),
394 		     "Unable to turn on CPU%d clock", cpu);
395 	}
396 
397 	/*
398 	 * Force reinit of HFPLLs and muxes to overwrite any potential
399 	 * incorrect configuration of HFPLLs and muxes by the bootloader.
400 	 * While at it, also make sure the cores are running at known rates
401 	 * and print the current rate.
402 	 *
403 	 * The clocks are set to aux clock rate first to make sure the
404 	 * secondary mux is not sourcing off of QSB. The rate is then set to
405 	 * two different rates to force a HFPLL reinit under all
406 	 * circumstances.
407 	 */
408 	cur_rate = clk_get_rate(clks[l2_mux]);
409 	aux_rate = 384000000;
410 	if (cur_rate < aux_rate) {
411 		pr_info("L2 @ Undefined rate. Forcing new rate.\n");
412 		cur_rate = aux_rate;
413 	}
414 	clk_set_rate(clks[l2_mux], aux_rate);
415 	clk_set_rate(clks[l2_mux], 2);
416 	clk_set_rate(clks[l2_mux], cur_rate);
417 	pr_info("L2 @ %lu KHz\n", clk_get_rate(clks[l2_mux]) / 1000);
418 	for_each_possible_cpu(cpu) {
419 		clk = clks[cpu];
420 		cur_rate = clk_get_rate(clk);
421 		if (cur_rate < aux_rate) {
422 			pr_info("CPU%d @ Undefined rate. Forcing new rate.\n", cpu);
423 			cur_rate = aux_rate;
424 		}
425 
426 		clk_set_rate(clk, aux_rate);
427 		clk_set_rate(clk, 2);
428 		clk_set_rate(clk, cur_rate);
429 		pr_info("CPU%d @ %lu KHz\n", cpu, clk_get_rate(clk) / 1000);
430 	}
431 
432 	of_clk_add_provider(dev->of_node, krait_of_get, clks);
433 
434 	return 0;
435 }
436 
437 static struct platform_driver krait_cc_driver = {
438 	.probe = krait_cc_probe,
439 	.driver = {
440 		.name = "krait-cc",
441 		.of_match_table = krait_cc_match_table,
442 	},
443 };
444 module_platform_driver(krait_cc_driver);
445 
446 MODULE_DESCRIPTION("Krait CPU Clock Driver");
447 MODULE_LICENSE("GPL v2");
448 MODULE_ALIAS("platform:krait-cc");
449