1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2018, The Linux Foundation. All rights reserved.*/
3 
4 #include <linux/err.h>
5 #include <linux/init.h>
6 #include <linux/kernel.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/pm_domain.h>
10 #include <linux/slab.h>
11 #include <linux/of.h>
12 #include <linux/of_device.h>
13 #include <linux/platform_device.h>
14 #include <linux/pm_opp.h>
15 #include <soc/qcom/cmd-db.h>
16 #include <soc/qcom/rpmh.h>
17 #include <dt-bindings/power/qcom-rpmpd.h>
18 
19 #define domain_to_rpmhpd(domain) container_of(domain, struct rpmhpd, pd)
20 
21 #define RPMH_ARC_MAX_LEVELS	16
22 
23 /**
24  * struct rpmhpd - top level RPMh power domain resource data structure
25  * @dev:		rpmh power domain controller device
26  * @pd:			generic_pm_domain corrresponding to the power domain
27  * @peer:		A peer power domain in case Active only Voting is
28  *			supported
29  * @active_only:	True if it represents an Active only peer
30  * @level:		An array of level (vlvl) to corner (hlvl) mappings
31  *			derived from cmd-db
32  * @level_count:	Number of levels supported by the power domain. max
33  *			being 16 (0 - 15)
34  * @enabled:		true if the power domain is enabled
35  * @res_name:		Resource name used for cmd-db lookup
36  * @addr:		Resource address as looped up using resource name from
37  *			cmd-db
38  */
39 struct rpmhpd {
40 	struct device	*dev;
41 	struct generic_pm_domain pd;
42 	struct generic_pm_domain *parent;
43 	struct rpmhpd	*peer;
44 	const bool	active_only;
45 	unsigned int	corner;
46 	unsigned int	active_corner;
47 	u32		level[RPMH_ARC_MAX_LEVELS];
48 	size_t		level_count;
49 	bool		enabled;
50 	const char	*res_name;
51 	u32		addr;
52 };
53 
54 struct rpmhpd_desc {
55 	struct rpmhpd **rpmhpds;
56 	size_t num_pds;
57 };
58 
59 static DEFINE_MUTEX(rpmhpd_lock);
60 
61 /* SDM845 RPMH powerdomains */
62 
63 static struct rpmhpd sdm845_ebi = {
64 	.pd = { .name = "ebi", },
65 	.res_name = "ebi.lvl",
66 };
67 
68 static struct rpmhpd sdm845_lmx = {
69 	.pd = { .name = "lmx", },
70 	.res_name = "lmx.lvl",
71 };
72 
73 static struct rpmhpd sdm845_lcx = {
74 	.pd = { .name = "lcx", },
75 	.res_name = "lcx.lvl",
76 };
77 
78 static struct rpmhpd sdm845_gfx = {
79 	.pd = { .name = "gfx", },
80 	.res_name = "gfx.lvl",
81 };
82 
83 static struct rpmhpd sdm845_mss = {
84 	.pd = { .name = "mss", },
85 	.res_name = "mss.lvl",
86 };
87 
88 static struct rpmhpd sdm845_mx_ao;
89 static struct rpmhpd sdm845_mx = {
90 	.pd = { .name = "mx", },
91 	.peer = &sdm845_mx_ao,
92 	.res_name = "mx.lvl",
93 };
94 
95 static struct rpmhpd sdm845_mx_ao = {
96 	.pd = { .name = "mx_ao", },
97 	.active_only = true,
98 	.peer = &sdm845_mx,
99 	.res_name = "mx.lvl",
100 };
101 
102 static struct rpmhpd sdm845_cx_ao;
103 static struct rpmhpd sdm845_cx = {
104 	.pd = { .name = "cx", },
105 	.peer = &sdm845_cx_ao,
106 	.parent = &sdm845_mx.pd,
107 	.res_name = "cx.lvl",
108 };
109 
110 static struct rpmhpd sdm845_cx_ao = {
111 	.pd = { .name = "cx_ao", },
112 	.active_only = true,
113 	.peer = &sdm845_cx,
114 	.parent = &sdm845_mx_ao.pd,
115 	.res_name = "cx.lvl",
116 };
117 
118 static struct rpmhpd *sdm845_rpmhpds[] = {
119 	[SDM845_EBI] = &sdm845_ebi,
120 	[SDM845_MX] = &sdm845_mx,
121 	[SDM845_MX_AO] = &sdm845_mx_ao,
122 	[SDM845_CX] = &sdm845_cx,
123 	[SDM845_CX_AO] = &sdm845_cx_ao,
124 	[SDM845_LMX] = &sdm845_lmx,
125 	[SDM845_LCX] = &sdm845_lcx,
126 	[SDM845_GFX] = &sdm845_gfx,
127 	[SDM845_MSS] = &sdm845_mss,
128 };
129 
130 static const struct rpmhpd_desc sdm845_desc = {
131 	.rpmhpds = sdm845_rpmhpds,
132 	.num_pds = ARRAY_SIZE(sdm845_rpmhpds),
133 };
134 
135 /* SM8150 RPMH powerdomains */
136 
137 static struct rpmhpd sm8150_mmcx_ao;
138 static struct rpmhpd sm8150_mmcx = {
139 	.pd = { .name = "mmcx", },
140 	.peer = &sm8150_mmcx_ao,
141 	.res_name = "mmcx.lvl",
142 };
143 
144 static struct rpmhpd sm8150_mmcx_ao = {
145 	.pd = { .name = "mmcx_ao", },
146 	.active_only = true,
147 	.peer = &sm8150_mmcx,
148 	.res_name = "mmcx.lvl",
149 };
150 
151 static struct rpmhpd *sm8150_rpmhpds[] = {
152 	[SM8150_MSS] = &sdm845_mss,
153 	[SM8150_EBI] = &sdm845_ebi,
154 	[SM8150_LMX] = &sdm845_lmx,
155 	[SM8150_LCX] = &sdm845_lcx,
156 	[SM8150_GFX] = &sdm845_gfx,
157 	[SM8150_MX] = &sdm845_mx,
158 	[SM8150_MX_AO] = &sdm845_mx_ao,
159 	[SM8150_CX] = &sdm845_cx,
160 	[SM8150_CX_AO] = &sdm845_cx_ao,
161 	[SM8150_MMCX] = &sm8150_mmcx,
162 	[SM8150_MMCX_AO] = &sm8150_mmcx_ao,
163 };
164 
165 static const struct rpmhpd_desc sm8150_desc = {
166 	.rpmhpds = sm8150_rpmhpds,
167 	.num_pds = ARRAY_SIZE(sm8150_rpmhpds),
168 };
169 
170 static struct rpmhpd *sm8250_rpmhpds[] = {
171 	[SM8250_CX] = &sdm845_cx,
172 	[SM8250_CX_AO] = &sdm845_cx_ao,
173 	[SM8250_EBI] = &sdm845_ebi,
174 	[SM8250_GFX] = &sdm845_gfx,
175 	[SM8250_LCX] = &sdm845_lcx,
176 	[SM8250_LMX] = &sdm845_lmx,
177 	[SM8250_MMCX] = &sm8150_mmcx,
178 	[SM8250_MMCX_AO] = &sm8150_mmcx_ao,
179 	[SM8250_MX] = &sdm845_mx,
180 	[SM8250_MX_AO] = &sdm845_mx_ao,
181 };
182 
183 static const struct rpmhpd_desc sm8250_desc = {
184 	.rpmhpds = sm8250_rpmhpds,
185 	.num_pds = ARRAY_SIZE(sm8250_rpmhpds),
186 };
187 
188 /* SC7180 RPMH powerdomains */
189 static struct rpmhpd *sc7180_rpmhpds[] = {
190 	[SC7180_CX] = &sdm845_cx,
191 	[SC7180_CX_AO] = &sdm845_cx_ao,
192 	[SC7180_GFX] = &sdm845_gfx,
193 	[SC7180_MX] = &sdm845_mx,
194 	[SC7180_MX_AO] = &sdm845_mx_ao,
195 	[SC7180_LMX] = &sdm845_lmx,
196 	[SC7180_LCX] = &sdm845_lcx,
197 	[SC7180_MSS] = &sdm845_mss,
198 };
199 
200 static const struct rpmhpd_desc sc7180_desc = {
201 	.rpmhpds = sc7180_rpmhpds,
202 	.num_pds = ARRAY_SIZE(sc7180_rpmhpds),
203 };
204 
205 static const struct of_device_id rpmhpd_match_table[] = {
206 	{ .compatible = "qcom,sc7180-rpmhpd", .data = &sc7180_desc },
207 	{ .compatible = "qcom,sdm845-rpmhpd", .data = &sdm845_desc },
208 	{ .compatible = "qcom,sm8150-rpmhpd", .data = &sm8150_desc },
209 	{ .compatible = "qcom,sm8250-rpmhpd", .data = &sm8250_desc },
210 	{ }
211 };
212 MODULE_DEVICE_TABLE(of, rpmhpd_match_table);
213 
rpmhpd_send_corner(struct rpmhpd * pd,int state,unsigned int corner,bool sync)214 static int rpmhpd_send_corner(struct rpmhpd *pd, int state,
215 			      unsigned int corner, bool sync)
216 {
217 	struct tcs_cmd cmd = {
218 		.addr = pd->addr,
219 		.data = corner,
220 	};
221 
222 	/*
223 	 * Wait for an ack only when we are increasing the
224 	 * perf state of the power domain
225 	 */
226 	if (sync)
227 		return rpmh_write(pd->dev, state, &cmd, 1);
228 	else
229 		return rpmh_write_async(pd->dev, state, &cmd, 1);
230 }
231 
to_active_sleep(struct rpmhpd * pd,unsigned int corner,unsigned int * active,unsigned int * sleep)232 static void to_active_sleep(struct rpmhpd *pd, unsigned int corner,
233 			    unsigned int *active, unsigned int *sleep)
234 {
235 	*active = corner;
236 
237 	if (pd->active_only)
238 		*sleep = 0;
239 	else
240 		*sleep = *active;
241 }
242 
243 /*
244  * This function is used to aggregate the votes across the active only
245  * resources and its peers. The aggregated votes are sent to RPMh as
246  * ACTIVE_ONLY votes (which take effect immediately), as WAKE_ONLY votes
247  * (applied by RPMh on system wakeup) and as SLEEP votes (applied by RPMh
248  * on system sleep).
249  * We send ACTIVE_ONLY votes for resources without any peers. For others,
250  * which have an active only peer, all 3 votes are sent.
251  */
rpmhpd_aggregate_corner(struct rpmhpd * pd,unsigned int corner)252 static int rpmhpd_aggregate_corner(struct rpmhpd *pd, unsigned int corner)
253 {
254 	int ret;
255 	struct rpmhpd *peer = pd->peer;
256 	unsigned int active_corner, sleep_corner;
257 	unsigned int this_active_corner = 0, this_sleep_corner = 0;
258 	unsigned int peer_active_corner = 0, peer_sleep_corner = 0;
259 
260 	to_active_sleep(pd, corner, &this_active_corner, &this_sleep_corner);
261 
262 	if (peer && peer->enabled)
263 		to_active_sleep(peer, peer->corner, &peer_active_corner,
264 				&peer_sleep_corner);
265 
266 	active_corner = max(this_active_corner, peer_active_corner);
267 
268 	ret = rpmhpd_send_corner(pd, RPMH_ACTIVE_ONLY_STATE, active_corner,
269 				 active_corner > pd->active_corner);
270 	if (ret)
271 		return ret;
272 
273 	pd->active_corner = active_corner;
274 
275 	if (peer) {
276 		peer->active_corner = active_corner;
277 
278 		ret = rpmhpd_send_corner(pd, RPMH_WAKE_ONLY_STATE,
279 					 active_corner, false);
280 		if (ret)
281 			return ret;
282 
283 		sleep_corner = max(this_sleep_corner, peer_sleep_corner);
284 
285 		return rpmhpd_send_corner(pd, RPMH_SLEEP_STATE, sleep_corner,
286 					  false);
287 	}
288 
289 	return ret;
290 }
291 
rpmhpd_power_on(struct generic_pm_domain * domain)292 static int rpmhpd_power_on(struct generic_pm_domain *domain)
293 {
294 	struct rpmhpd *pd = domain_to_rpmhpd(domain);
295 	int ret = 0;
296 
297 	mutex_lock(&rpmhpd_lock);
298 
299 	if (pd->corner)
300 		ret = rpmhpd_aggregate_corner(pd, pd->corner);
301 
302 	if (!ret)
303 		pd->enabled = true;
304 
305 	mutex_unlock(&rpmhpd_lock);
306 
307 	return ret;
308 }
309 
rpmhpd_power_off(struct generic_pm_domain * domain)310 static int rpmhpd_power_off(struct generic_pm_domain *domain)
311 {
312 	struct rpmhpd *pd = domain_to_rpmhpd(domain);
313 	int ret = 0;
314 
315 	mutex_lock(&rpmhpd_lock);
316 
317 	ret = rpmhpd_aggregate_corner(pd, pd->level[0]);
318 
319 	if (!ret)
320 		pd->enabled = false;
321 
322 	mutex_unlock(&rpmhpd_lock);
323 
324 	return ret;
325 }
326 
rpmhpd_set_performance_state(struct generic_pm_domain * domain,unsigned int level)327 static int rpmhpd_set_performance_state(struct generic_pm_domain *domain,
328 					unsigned int level)
329 {
330 	struct rpmhpd *pd = domain_to_rpmhpd(domain);
331 	int ret = 0, i;
332 
333 	mutex_lock(&rpmhpd_lock);
334 
335 	for (i = 0; i < pd->level_count; i++)
336 		if (level <= pd->level[i])
337 			break;
338 
339 	/*
340 	 * If the level requested is more than that supported by the
341 	 * max corner, just set it to max anyway.
342 	 */
343 	if (i == pd->level_count)
344 		i--;
345 
346 	if (pd->enabled) {
347 		ret = rpmhpd_aggregate_corner(pd, i);
348 		if (ret)
349 			goto out;
350 	}
351 
352 	pd->corner = i;
353 out:
354 	mutex_unlock(&rpmhpd_lock);
355 
356 	return ret;
357 }
358 
rpmhpd_get_performance_state(struct generic_pm_domain * genpd,struct dev_pm_opp * opp)359 static unsigned int rpmhpd_get_performance_state(struct generic_pm_domain *genpd,
360 						 struct dev_pm_opp *opp)
361 {
362 	return dev_pm_opp_get_level(opp);
363 }
364 
rpmhpd_update_level_mapping(struct rpmhpd * rpmhpd)365 static int rpmhpd_update_level_mapping(struct rpmhpd *rpmhpd)
366 {
367 	int i;
368 	const u16 *buf;
369 
370 	buf = cmd_db_read_aux_data(rpmhpd->res_name, &rpmhpd->level_count);
371 	if (IS_ERR(buf))
372 		return PTR_ERR(buf);
373 
374 	/* 2 bytes used for each command DB aux data entry */
375 	rpmhpd->level_count >>= 1;
376 
377 	if (rpmhpd->level_count > RPMH_ARC_MAX_LEVELS)
378 		return -EINVAL;
379 
380 	for (i = 0; i < rpmhpd->level_count; i++) {
381 		rpmhpd->level[i] = buf[i];
382 
383 		/*
384 		 * The AUX data may be zero padded.  These 0 valued entries at
385 		 * the end of the map must be ignored.
386 		 */
387 		if (i > 0 && rpmhpd->level[i] == 0) {
388 			rpmhpd->level_count = i;
389 			break;
390 		}
391 		pr_debug("%s: ARC hlvl=%2d --> vlvl=%4u\n", rpmhpd->res_name, i,
392 			 rpmhpd->level[i]);
393 	}
394 
395 	return 0;
396 }
397 
rpmhpd_probe(struct platform_device * pdev)398 static int rpmhpd_probe(struct platform_device *pdev)
399 {
400 	int i, ret;
401 	size_t num_pds;
402 	struct device *dev = &pdev->dev;
403 	struct genpd_onecell_data *data;
404 	struct rpmhpd **rpmhpds;
405 	const struct rpmhpd_desc *desc;
406 
407 	desc = of_device_get_match_data(dev);
408 	if (!desc)
409 		return -EINVAL;
410 
411 	rpmhpds = desc->rpmhpds;
412 	num_pds = desc->num_pds;
413 
414 	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
415 	if (!data)
416 		return -ENOMEM;
417 
418 	data->domains = devm_kcalloc(dev, num_pds, sizeof(*data->domains),
419 				     GFP_KERNEL);
420 	if (!data->domains)
421 		return -ENOMEM;
422 
423 	data->num_domains = num_pds;
424 
425 	for (i = 0; i < num_pds; i++) {
426 		if (!rpmhpds[i]) {
427 			dev_warn(dev, "rpmhpds[%d] is empty\n", i);
428 			continue;
429 		}
430 
431 		rpmhpds[i]->dev = dev;
432 		rpmhpds[i]->addr = cmd_db_read_addr(rpmhpds[i]->res_name);
433 		if (!rpmhpds[i]->addr) {
434 			dev_err(dev, "Could not find RPMh address for resource %s\n",
435 				rpmhpds[i]->res_name);
436 			return -ENODEV;
437 		}
438 
439 		ret = cmd_db_read_slave_id(rpmhpds[i]->res_name);
440 		if (ret != CMD_DB_HW_ARC) {
441 			dev_err(dev, "RPMh slave ID mismatch\n");
442 			return -EINVAL;
443 		}
444 
445 		ret = rpmhpd_update_level_mapping(rpmhpds[i]);
446 		if (ret)
447 			return ret;
448 
449 		rpmhpds[i]->pd.power_off = rpmhpd_power_off;
450 		rpmhpds[i]->pd.power_on = rpmhpd_power_on;
451 		rpmhpds[i]->pd.set_performance_state = rpmhpd_set_performance_state;
452 		rpmhpds[i]->pd.opp_to_performance_state = rpmhpd_get_performance_state;
453 		pm_genpd_init(&rpmhpds[i]->pd, NULL, true);
454 
455 		data->domains[i] = &rpmhpds[i]->pd;
456 	}
457 
458 	/* Add subdomains */
459 	for (i = 0; i < num_pds; i++) {
460 		if (!rpmhpds[i])
461 			continue;
462 		if (rpmhpds[i]->parent)
463 			pm_genpd_add_subdomain(rpmhpds[i]->parent,
464 					       &rpmhpds[i]->pd);
465 	}
466 
467 	return of_genpd_add_provider_onecell(pdev->dev.of_node, data);
468 }
469 
470 static struct platform_driver rpmhpd_driver = {
471 	.driver = {
472 		.name = "qcom-rpmhpd",
473 		.of_match_table = rpmhpd_match_table,
474 		.suppress_bind_attrs = true,
475 	},
476 	.probe = rpmhpd_probe,
477 };
478 
rpmhpd_init(void)479 static int __init rpmhpd_init(void)
480 {
481 	return platform_driver_register(&rpmhpd_driver);
482 }
483 core_initcall(rpmhpd_init);
484 
485 MODULE_DESCRIPTION("Qualcomm Technologies, Inc. RPMh Power Domain Driver");
486 MODULE_LICENSE("GPL v2");
487