xref: /linux/drivers/clk/clk-rpmi.c (revision ec2e0fb07d789976c601bec19ecced7a501c3705)
1*5ba9f520SRahul Pathak // SPDX-License-Identifier: GPL-2.0
2*5ba9f520SRahul Pathak /*
3*5ba9f520SRahul Pathak  * RISC-V MPXY Based Clock Driver
4*5ba9f520SRahul Pathak  *
5*5ba9f520SRahul Pathak  * Copyright (C) 2025 Ventana Micro Systems Ltd.
6*5ba9f520SRahul Pathak  */
7*5ba9f520SRahul Pathak 
8*5ba9f520SRahul Pathak #include <linux/clk-provider.h>
9*5ba9f520SRahul Pathak #include <linux/err.h>
10*5ba9f520SRahul Pathak #include <linux/mailbox_client.h>
11*5ba9f520SRahul Pathak #include <linux/mailbox/riscv-rpmi-message.h>
12*5ba9f520SRahul Pathak #include <linux/module.h>
13*5ba9f520SRahul Pathak #include <linux/platform_device.h>
14*5ba9f520SRahul Pathak #include <linux/types.h>
15*5ba9f520SRahul Pathak #include <linux/slab.h>
16*5ba9f520SRahul Pathak #include <linux/wordpart.h>
17*5ba9f520SRahul Pathak 
18*5ba9f520SRahul Pathak #define RPMI_CLK_DISCRETE_MAX_NUM_RATES		16
19*5ba9f520SRahul Pathak #define RPMI_CLK_NAME_LEN			16
20*5ba9f520SRahul Pathak 
21*5ba9f520SRahul Pathak #define to_rpmi_clk(clk)	container_of(clk, struct rpmi_clk, hw)
22*5ba9f520SRahul Pathak 
23*5ba9f520SRahul Pathak enum rpmi_clk_config {
24*5ba9f520SRahul Pathak 	RPMI_CLK_DISABLE = 0,
25*5ba9f520SRahul Pathak 	RPMI_CLK_ENABLE = 1,
26*5ba9f520SRahul Pathak 	RPMI_CLK_CONFIG_MAX_IDX
27*5ba9f520SRahul Pathak };
28*5ba9f520SRahul Pathak 
29*5ba9f520SRahul Pathak #define RPMI_CLK_TYPE_MASK			GENMASK(1, 0)
30*5ba9f520SRahul Pathak enum rpmi_clk_type {
31*5ba9f520SRahul Pathak 	RPMI_CLK_DISCRETE = 0,
32*5ba9f520SRahul Pathak 	RPMI_CLK_LINEAR = 1,
33*5ba9f520SRahul Pathak 	RPMI_CLK_TYPE_MAX_IDX
34*5ba9f520SRahul Pathak };
35*5ba9f520SRahul Pathak 
36*5ba9f520SRahul Pathak struct rpmi_clk_context {
37*5ba9f520SRahul Pathak 	struct device *dev;
38*5ba9f520SRahul Pathak 	struct mbox_chan *chan;
39*5ba9f520SRahul Pathak 	struct mbox_client client;
40*5ba9f520SRahul Pathak 	u32 max_msg_data_size;
41*5ba9f520SRahul Pathak };
42*5ba9f520SRahul Pathak 
43*5ba9f520SRahul Pathak /*
44*5ba9f520SRahul Pathak  * rpmi_clk_rates represents the rates format
45*5ba9f520SRahul Pathak  * as specified by the RPMI specification.
46*5ba9f520SRahul Pathak  * No other data format (e.g., struct linear_range)
47*5ba9f520SRahul Pathak  * is required to avoid to and from conversion.
48*5ba9f520SRahul Pathak  */
49*5ba9f520SRahul Pathak union rpmi_clk_rates {
50*5ba9f520SRahul Pathak 	u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
51*5ba9f520SRahul Pathak 	struct {
52*5ba9f520SRahul Pathak 		u64 min;
53*5ba9f520SRahul Pathak 		u64 max;
54*5ba9f520SRahul Pathak 		u64 step;
55*5ba9f520SRahul Pathak 	} linear;
56*5ba9f520SRahul Pathak };
57*5ba9f520SRahul Pathak 
58*5ba9f520SRahul Pathak struct rpmi_clk {
59*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context;
60*5ba9f520SRahul Pathak 	u32 id;
61*5ba9f520SRahul Pathak 	u32 num_rates;
62*5ba9f520SRahul Pathak 	u32 transition_latency;
63*5ba9f520SRahul Pathak 	enum rpmi_clk_type type;
64*5ba9f520SRahul Pathak 	union rpmi_clk_rates *rates;
65*5ba9f520SRahul Pathak 	char name[RPMI_CLK_NAME_LEN];
66*5ba9f520SRahul Pathak 	struct clk_hw hw;
67*5ba9f520SRahul Pathak };
68*5ba9f520SRahul Pathak 
69*5ba9f520SRahul Pathak struct rpmi_clk_rate_discrete {
70*5ba9f520SRahul Pathak 	__le32 lo;
71*5ba9f520SRahul Pathak 	__le32 hi;
72*5ba9f520SRahul Pathak };
73*5ba9f520SRahul Pathak 
74*5ba9f520SRahul Pathak struct rpmi_clk_rate_linear {
75*5ba9f520SRahul Pathak 	__le32 min_lo;
76*5ba9f520SRahul Pathak 	__le32 min_hi;
77*5ba9f520SRahul Pathak 	__le32 max_lo;
78*5ba9f520SRahul Pathak 	__le32 max_hi;
79*5ba9f520SRahul Pathak 	__le32 step_lo;
80*5ba9f520SRahul Pathak 	__le32 step_hi;
81*5ba9f520SRahul Pathak };
82*5ba9f520SRahul Pathak 
83*5ba9f520SRahul Pathak struct rpmi_get_num_clocks_rx {
84*5ba9f520SRahul Pathak 	__le32 status;
85*5ba9f520SRahul Pathak 	__le32 num_clocks;
86*5ba9f520SRahul Pathak };
87*5ba9f520SRahul Pathak 
88*5ba9f520SRahul Pathak struct rpmi_get_attrs_tx {
89*5ba9f520SRahul Pathak 	__le32 clkid;
90*5ba9f520SRahul Pathak };
91*5ba9f520SRahul Pathak 
92*5ba9f520SRahul Pathak struct rpmi_get_attrs_rx {
93*5ba9f520SRahul Pathak 	__le32 status;
94*5ba9f520SRahul Pathak 	__le32 flags;
95*5ba9f520SRahul Pathak 	__le32 num_rates;
96*5ba9f520SRahul Pathak 	__le32 transition_latency;
97*5ba9f520SRahul Pathak 	char name[RPMI_CLK_NAME_LEN];
98*5ba9f520SRahul Pathak };
99*5ba9f520SRahul Pathak 
100*5ba9f520SRahul Pathak struct rpmi_get_supp_rates_tx {
101*5ba9f520SRahul Pathak 	__le32 clkid;
102*5ba9f520SRahul Pathak 	__le32 clk_rate_idx;
103*5ba9f520SRahul Pathak };
104*5ba9f520SRahul Pathak 
105*5ba9f520SRahul Pathak struct rpmi_get_supp_rates_rx {
106*5ba9f520SRahul Pathak 	__le32 status;
107*5ba9f520SRahul Pathak 	__le32 flags;
108*5ba9f520SRahul Pathak 	__le32 remaining;
109*5ba9f520SRahul Pathak 	__le32 returned;
110*5ba9f520SRahul Pathak 	__le32 rates[];
111*5ba9f520SRahul Pathak };
112*5ba9f520SRahul Pathak 
113*5ba9f520SRahul Pathak struct rpmi_get_rate_tx {
114*5ba9f520SRahul Pathak 	__le32 clkid;
115*5ba9f520SRahul Pathak };
116*5ba9f520SRahul Pathak 
117*5ba9f520SRahul Pathak struct rpmi_get_rate_rx {
118*5ba9f520SRahul Pathak 	__le32 status;
119*5ba9f520SRahul Pathak 	__le32 lo;
120*5ba9f520SRahul Pathak 	__le32 hi;
121*5ba9f520SRahul Pathak };
122*5ba9f520SRahul Pathak 
123*5ba9f520SRahul Pathak struct rpmi_set_rate_tx {
124*5ba9f520SRahul Pathak 	__le32 clkid;
125*5ba9f520SRahul Pathak 	__le32 flags;
126*5ba9f520SRahul Pathak 	__le32 lo;
127*5ba9f520SRahul Pathak 	__le32 hi;
128*5ba9f520SRahul Pathak };
129*5ba9f520SRahul Pathak 
130*5ba9f520SRahul Pathak struct rpmi_set_rate_rx {
131*5ba9f520SRahul Pathak 	__le32 status;
132*5ba9f520SRahul Pathak };
133*5ba9f520SRahul Pathak 
134*5ba9f520SRahul Pathak struct rpmi_set_config_tx {
135*5ba9f520SRahul Pathak 	__le32 clkid;
136*5ba9f520SRahul Pathak 	__le32 config;
137*5ba9f520SRahul Pathak };
138*5ba9f520SRahul Pathak 
139*5ba9f520SRahul Pathak struct rpmi_set_config_rx {
140*5ba9f520SRahul Pathak 	__le32 status;
141*5ba9f520SRahul Pathak };
142*5ba9f520SRahul Pathak 
rpmi_clkrate_u64(u32 __hi,u32 __lo)143*5ba9f520SRahul Pathak static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
144*5ba9f520SRahul Pathak {
145*5ba9f520SRahul Pathak 	return (((u64)(__hi) << 32) | (u32)(__lo));
146*5ba9f520SRahul Pathak }
147*5ba9f520SRahul Pathak 
rpmi_clk_get_num_clocks(struct rpmi_clk_context * context)148*5ba9f520SRahul Pathak static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
149*5ba9f520SRahul Pathak {
150*5ba9f520SRahul Pathak 	struct rpmi_get_num_clocks_rx rx, *resp;
151*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
152*5ba9f520SRahul Pathak 	int ret;
153*5ba9f520SRahul Pathak 
154*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
155*5ba9f520SRahul Pathak 					  NULL, 0, &rx, sizeof(rx));
156*5ba9f520SRahul Pathak 
157*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
158*5ba9f520SRahul Pathak 	if (ret)
159*5ba9f520SRahul Pathak 		return 0;
160*5ba9f520SRahul Pathak 
161*5ba9f520SRahul Pathak 	resp = rpmi_mbox_get_msg_response(&msg);
162*5ba9f520SRahul Pathak 	if (!resp || resp->status)
163*5ba9f520SRahul Pathak 		return 0;
164*5ba9f520SRahul Pathak 
165*5ba9f520SRahul Pathak 	return le32_to_cpu(resp->num_clocks);
166*5ba9f520SRahul Pathak }
167*5ba9f520SRahul Pathak 
rpmi_clk_get_attrs(u32 clkid,struct rpmi_clk * rpmi_clk)168*5ba9f520SRahul Pathak static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
169*5ba9f520SRahul Pathak {
170*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context = rpmi_clk->context;
171*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
172*5ba9f520SRahul Pathak 	struct rpmi_get_attrs_tx tx;
173*5ba9f520SRahul Pathak 	struct rpmi_get_attrs_rx rx, *resp;
174*5ba9f520SRahul Pathak 	u8 format;
175*5ba9f520SRahul Pathak 	int ret;
176*5ba9f520SRahul Pathak 
177*5ba9f520SRahul Pathak 	tx.clkid = cpu_to_le32(clkid);
178*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
179*5ba9f520SRahul Pathak 					  &tx, sizeof(tx), &rx, sizeof(rx));
180*5ba9f520SRahul Pathak 
181*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
182*5ba9f520SRahul Pathak 	if (ret)
183*5ba9f520SRahul Pathak 		return ret;
184*5ba9f520SRahul Pathak 
185*5ba9f520SRahul Pathak 	resp = rpmi_mbox_get_msg_response(&msg);
186*5ba9f520SRahul Pathak 	if (!resp)
187*5ba9f520SRahul Pathak 		return -EINVAL;
188*5ba9f520SRahul Pathak 	if (resp->status)
189*5ba9f520SRahul Pathak 		return rpmi_to_linux_error(le32_to_cpu(resp->status));
190*5ba9f520SRahul Pathak 
191*5ba9f520SRahul Pathak 	rpmi_clk->id = clkid;
192*5ba9f520SRahul Pathak 	rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
193*5ba9f520SRahul Pathak 	rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
194*5ba9f520SRahul Pathak 	strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
195*5ba9f520SRahul Pathak 
196*5ba9f520SRahul Pathak 	format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
197*5ba9f520SRahul Pathak 	if (format >= RPMI_CLK_TYPE_MAX_IDX)
198*5ba9f520SRahul Pathak 		return -EINVAL;
199*5ba9f520SRahul Pathak 
200*5ba9f520SRahul Pathak 	rpmi_clk->type = format;
201*5ba9f520SRahul Pathak 
202*5ba9f520SRahul Pathak 	return 0;
203*5ba9f520SRahul Pathak }
204*5ba9f520SRahul Pathak 
rpmi_clk_get_supported_rates(u32 clkid,struct rpmi_clk * rpmi_clk)205*5ba9f520SRahul Pathak static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
206*5ba9f520SRahul Pathak {
207*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context = rpmi_clk->context;
208*5ba9f520SRahul Pathak 	struct rpmi_clk_rate_discrete *rate_discrete;
209*5ba9f520SRahul Pathak 	struct rpmi_clk_rate_linear *rate_linear;
210*5ba9f520SRahul Pathak 	struct rpmi_get_supp_rates_tx tx;
211*5ba9f520SRahul Pathak 	struct rpmi_get_supp_rates_rx *resp;
212*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
213*5ba9f520SRahul Pathak 	size_t clk_rate_idx;
214*5ba9f520SRahul Pathak 	int ret, rateidx, j;
215*5ba9f520SRahul Pathak 
216*5ba9f520SRahul Pathak 	tx.clkid = cpu_to_le32(clkid);
217*5ba9f520SRahul Pathak 	tx.clk_rate_idx = 0;
218*5ba9f520SRahul Pathak 
219*5ba9f520SRahul Pathak 	/*
220*5ba9f520SRahul Pathak 	 * Make sure we allocate rx buffer sufficient to be accommodate all
221*5ba9f520SRahul Pathak 	 * the rates sent in one RPMI message.
222*5ba9f520SRahul Pathak 	 */
223*5ba9f520SRahul Pathak 	struct rpmi_get_supp_rates_rx *rx __free(kfree) =
224*5ba9f520SRahul Pathak 					kzalloc(context->max_msg_data_size, GFP_KERNEL);
225*5ba9f520SRahul Pathak 	if (!rx)
226*5ba9f520SRahul Pathak 		return -ENOMEM;
227*5ba9f520SRahul Pathak 
228*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
229*5ba9f520SRahul Pathak 					  &tx, sizeof(tx), rx, context->max_msg_data_size);
230*5ba9f520SRahul Pathak 
231*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
232*5ba9f520SRahul Pathak 	if (ret)
233*5ba9f520SRahul Pathak 		return ret;
234*5ba9f520SRahul Pathak 
235*5ba9f520SRahul Pathak 	resp = rpmi_mbox_get_msg_response(&msg);
236*5ba9f520SRahul Pathak 	if (!resp)
237*5ba9f520SRahul Pathak 		return -EINVAL;
238*5ba9f520SRahul Pathak 	if (resp->status)
239*5ba9f520SRahul Pathak 		return rpmi_to_linux_error(le32_to_cpu(resp->status));
240*5ba9f520SRahul Pathak 	if (!le32_to_cpu(resp->returned))
241*5ba9f520SRahul Pathak 		return -EINVAL;
242*5ba9f520SRahul Pathak 
243*5ba9f520SRahul Pathak 	if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
244*5ba9f520SRahul Pathak 		rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
245*5ba9f520SRahul Pathak 
246*5ba9f520SRahul Pathak 		for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
247*5ba9f520SRahul Pathak 			rpmi_clk->rates->discrete[rateidx] =
248*5ba9f520SRahul Pathak 				rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
249*5ba9f520SRahul Pathak 						 le32_to_cpu(rate_discrete[rateidx].lo));
250*5ba9f520SRahul Pathak 		}
251*5ba9f520SRahul Pathak 
252*5ba9f520SRahul Pathak 		/*
253*5ba9f520SRahul Pathak 		 * Keep sending the request message until all
254*5ba9f520SRahul Pathak 		 * the rates are received.
255*5ba9f520SRahul Pathak 		 */
256*5ba9f520SRahul Pathak 		clk_rate_idx = 0;
257*5ba9f520SRahul Pathak 		while (le32_to_cpu(resp->remaining)) {
258*5ba9f520SRahul Pathak 			clk_rate_idx += le32_to_cpu(resp->returned);
259*5ba9f520SRahul Pathak 			tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
260*5ba9f520SRahul Pathak 
261*5ba9f520SRahul Pathak 			rpmi_mbox_init_send_with_response(&msg,
262*5ba9f520SRahul Pathak 							  RPMI_CLK_SRV_GET_SUPPORTED_RATES,
263*5ba9f520SRahul Pathak 							  &tx, sizeof(tx),
264*5ba9f520SRahul Pathak 							  rx, context->max_msg_data_size);
265*5ba9f520SRahul Pathak 
266*5ba9f520SRahul Pathak 			ret = rpmi_mbox_send_message(context->chan, &msg);
267*5ba9f520SRahul Pathak 			if (ret)
268*5ba9f520SRahul Pathak 				return ret;
269*5ba9f520SRahul Pathak 
270*5ba9f520SRahul Pathak 			resp = rpmi_mbox_get_msg_response(&msg);
271*5ba9f520SRahul Pathak 			if (!resp)
272*5ba9f520SRahul Pathak 				return -EINVAL;
273*5ba9f520SRahul Pathak 			if (resp->status)
274*5ba9f520SRahul Pathak 				return rpmi_to_linux_error(le32_to_cpu(resp->status));
275*5ba9f520SRahul Pathak 			if (!le32_to_cpu(resp->returned))
276*5ba9f520SRahul Pathak 				return -EINVAL;
277*5ba9f520SRahul Pathak 
278*5ba9f520SRahul Pathak 			for (j = 0; j < le32_to_cpu(resp->returned); j++) {
279*5ba9f520SRahul Pathak 				if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
280*5ba9f520SRahul Pathak 					break;
281*5ba9f520SRahul Pathak 				rpmi_clk->rates->discrete[rateidx++] =
282*5ba9f520SRahul Pathak 					rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
283*5ba9f520SRahul Pathak 							 le32_to_cpu(rate_discrete[j].lo));
284*5ba9f520SRahul Pathak 			}
285*5ba9f520SRahul Pathak 		}
286*5ba9f520SRahul Pathak 	} else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
287*5ba9f520SRahul Pathak 		rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
288*5ba9f520SRahul Pathak 
289*5ba9f520SRahul Pathak 		rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
290*5ba9f520SRahul Pathak 							       le32_to_cpu(rate_linear->min_lo));
291*5ba9f520SRahul Pathak 		rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
292*5ba9f520SRahul Pathak 							       le32_to_cpu(rate_linear->max_lo));
293*5ba9f520SRahul Pathak 		rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
294*5ba9f520SRahul Pathak 								le32_to_cpu(rate_linear->step_lo));
295*5ba9f520SRahul Pathak 	}
296*5ba9f520SRahul Pathak 
297*5ba9f520SRahul Pathak 	return 0;
298*5ba9f520SRahul Pathak }
299*5ba9f520SRahul Pathak 
rpmi_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)300*5ba9f520SRahul Pathak static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
301*5ba9f520SRahul Pathak 					  unsigned long parent_rate)
302*5ba9f520SRahul Pathak {
303*5ba9f520SRahul Pathak 	struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
304*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context = rpmi_clk->context;
305*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
306*5ba9f520SRahul Pathak 	struct rpmi_get_rate_tx tx;
307*5ba9f520SRahul Pathak 	struct rpmi_get_rate_rx rx, *resp;
308*5ba9f520SRahul Pathak 	int ret;
309*5ba9f520SRahul Pathak 
310*5ba9f520SRahul Pathak 	tx.clkid = cpu_to_le32(rpmi_clk->id);
311*5ba9f520SRahul Pathak 
312*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
313*5ba9f520SRahul Pathak 					  &tx, sizeof(tx), &rx, sizeof(rx));
314*5ba9f520SRahul Pathak 
315*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
316*5ba9f520SRahul Pathak 	if (ret)
317*5ba9f520SRahul Pathak 		return ret;
318*5ba9f520SRahul Pathak 
319*5ba9f520SRahul Pathak 	resp = rpmi_mbox_get_msg_response(&msg);
320*5ba9f520SRahul Pathak 	if (!resp)
321*5ba9f520SRahul Pathak 		return -EINVAL;
322*5ba9f520SRahul Pathak 	if (resp->status)
323*5ba9f520SRahul Pathak 		return rpmi_to_linux_error(le32_to_cpu(resp->status));
324*5ba9f520SRahul Pathak 
325*5ba9f520SRahul Pathak 	return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
326*5ba9f520SRahul Pathak }
327*5ba9f520SRahul Pathak 
rpmi_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)328*5ba9f520SRahul Pathak static int rpmi_clk_determine_rate(struct clk_hw *hw,
329*5ba9f520SRahul Pathak 				   struct clk_rate_request *req)
330*5ba9f520SRahul Pathak {
331*5ba9f520SRahul Pathak 	struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
332*5ba9f520SRahul Pathak 	u64 fmin, fmax, ftmp;
333*5ba9f520SRahul Pathak 
334*5ba9f520SRahul Pathak 	/*
335*5ba9f520SRahul Pathak 	 * Keep the requested rate if the clock format
336*5ba9f520SRahul Pathak 	 * is of discrete type. Let the platform which
337*5ba9f520SRahul Pathak 	 * is actually controlling the clock handle that.
338*5ba9f520SRahul Pathak 	 */
339*5ba9f520SRahul Pathak 	if (rpmi_clk->type == RPMI_CLK_DISCRETE)
340*5ba9f520SRahul Pathak 		return 0;
341*5ba9f520SRahul Pathak 
342*5ba9f520SRahul Pathak 	fmin = rpmi_clk->rates->linear.min;
343*5ba9f520SRahul Pathak 	fmax = rpmi_clk->rates->linear.max;
344*5ba9f520SRahul Pathak 
345*5ba9f520SRahul Pathak 	if (req->rate <= fmin) {
346*5ba9f520SRahul Pathak 		req->rate = fmin;
347*5ba9f520SRahul Pathak 		return 0;
348*5ba9f520SRahul Pathak 	} else if (req->rate >= fmax) {
349*5ba9f520SRahul Pathak 		req->rate = fmax;
350*5ba9f520SRahul Pathak 		return 0;
351*5ba9f520SRahul Pathak 	}
352*5ba9f520SRahul Pathak 
353*5ba9f520SRahul Pathak 	ftmp = req->rate - fmin;
354*5ba9f520SRahul Pathak 	ftmp += rpmi_clk->rates->linear.step - 1;
355*5ba9f520SRahul Pathak 	do_div(ftmp, rpmi_clk->rates->linear.step);
356*5ba9f520SRahul Pathak 
357*5ba9f520SRahul Pathak 	req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
358*5ba9f520SRahul Pathak 
359*5ba9f520SRahul Pathak 	return 0;
360*5ba9f520SRahul Pathak }
361*5ba9f520SRahul Pathak 
rpmi_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)362*5ba9f520SRahul Pathak static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
363*5ba9f520SRahul Pathak 			     unsigned long parent_rate)
364*5ba9f520SRahul Pathak {
365*5ba9f520SRahul Pathak 	struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
366*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context = rpmi_clk->context;
367*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
368*5ba9f520SRahul Pathak 	struct rpmi_set_rate_tx tx;
369*5ba9f520SRahul Pathak 	struct rpmi_set_rate_rx rx, *resp;
370*5ba9f520SRahul Pathak 	int ret;
371*5ba9f520SRahul Pathak 
372*5ba9f520SRahul Pathak 	tx.clkid = cpu_to_le32(rpmi_clk->id);
373*5ba9f520SRahul Pathak 	tx.lo = cpu_to_le32(lower_32_bits(rate));
374*5ba9f520SRahul Pathak 	tx.hi = cpu_to_le32(upper_32_bits(rate));
375*5ba9f520SRahul Pathak 
376*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
377*5ba9f520SRahul Pathak 					  &tx, sizeof(tx), &rx, sizeof(rx));
378*5ba9f520SRahul Pathak 
379*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
380*5ba9f520SRahul Pathak 	if (ret)
381*5ba9f520SRahul Pathak 		return ret;
382*5ba9f520SRahul Pathak 
383*5ba9f520SRahul Pathak 	resp = rpmi_mbox_get_msg_response(&msg);
384*5ba9f520SRahul Pathak 	if (!resp)
385*5ba9f520SRahul Pathak 		return -EINVAL;
386*5ba9f520SRahul Pathak 	if (resp->status)
387*5ba9f520SRahul Pathak 		return rpmi_to_linux_error(le32_to_cpu(resp->status));
388*5ba9f520SRahul Pathak 
389*5ba9f520SRahul Pathak 	return 0;
390*5ba9f520SRahul Pathak }
391*5ba9f520SRahul Pathak 
rpmi_clk_enable(struct clk_hw * hw)392*5ba9f520SRahul Pathak static int rpmi_clk_enable(struct clk_hw *hw)
393*5ba9f520SRahul Pathak {
394*5ba9f520SRahul Pathak 	struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
395*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context = rpmi_clk->context;
396*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
397*5ba9f520SRahul Pathak 	struct rpmi_set_config_tx tx;
398*5ba9f520SRahul Pathak 	struct rpmi_set_config_rx rx, *resp;
399*5ba9f520SRahul Pathak 	int ret;
400*5ba9f520SRahul Pathak 
401*5ba9f520SRahul Pathak 	tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
402*5ba9f520SRahul Pathak 	tx.clkid = cpu_to_le32(rpmi_clk->id);
403*5ba9f520SRahul Pathak 
404*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
405*5ba9f520SRahul Pathak 					  &tx, sizeof(tx), &rx, sizeof(rx));
406*5ba9f520SRahul Pathak 
407*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
408*5ba9f520SRahul Pathak 	if (ret)
409*5ba9f520SRahul Pathak 		return ret;
410*5ba9f520SRahul Pathak 
411*5ba9f520SRahul Pathak 	resp = rpmi_mbox_get_msg_response(&msg);
412*5ba9f520SRahul Pathak 	if (!resp)
413*5ba9f520SRahul Pathak 		return -EINVAL;
414*5ba9f520SRahul Pathak 	if (resp->status)
415*5ba9f520SRahul Pathak 		return rpmi_to_linux_error(le32_to_cpu(resp->status));
416*5ba9f520SRahul Pathak 
417*5ba9f520SRahul Pathak 	return 0;
418*5ba9f520SRahul Pathak }
419*5ba9f520SRahul Pathak 
rpmi_clk_disable(struct clk_hw * hw)420*5ba9f520SRahul Pathak static void rpmi_clk_disable(struct clk_hw *hw)
421*5ba9f520SRahul Pathak {
422*5ba9f520SRahul Pathak 	struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
423*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context = rpmi_clk->context;
424*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
425*5ba9f520SRahul Pathak 	struct rpmi_set_config_tx tx;
426*5ba9f520SRahul Pathak 	struct rpmi_set_config_rx rx;
427*5ba9f520SRahul Pathak 
428*5ba9f520SRahul Pathak 	tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
429*5ba9f520SRahul Pathak 	tx.clkid = cpu_to_le32(rpmi_clk->id);
430*5ba9f520SRahul Pathak 
431*5ba9f520SRahul Pathak 	rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
432*5ba9f520SRahul Pathak 					  &tx, sizeof(tx), &rx, sizeof(rx));
433*5ba9f520SRahul Pathak 
434*5ba9f520SRahul Pathak 	rpmi_mbox_send_message(context->chan, &msg);
435*5ba9f520SRahul Pathak }
436*5ba9f520SRahul Pathak 
437*5ba9f520SRahul Pathak static const struct clk_ops rpmi_clk_ops = {
438*5ba9f520SRahul Pathak 	.recalc_rate = rpmi_clk_recalc_rate,
439*5ba9f520SRahul Pathak 	.determine_rate = rpmi_clk_determine_rate,
440*5ba9f520SRahul Pathak 	.set_rate = rpmi_clk_set_rate,
441*5ba9f520SRahul Pathak 	.prepare = rpmi_clk_enable,
442*5ba9f520SRahul Pathak 	.unprepare = rpmi_clk_disable,
443*5ba9f520SRahul Pathak };
444*5ba9f520SRahul Pathak 
rpmi_clk_enumerate(struct rpmi_clk_context * context,u32 clkid)445*5ba9f520SRahul Pathak static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
446*5ba9f520SRahul Pathak {
447*5ba9f520SRahul Pathak 	struct device *dev = context->dev;
448*5ba9f520SRahul Pathak 	unsigned long min_rate, max_rate;
449*5ba9f520SRahul Pathak 	union rpmi_clk_rates *rates;
450*5ba9f520SRahul Pathak 	struct rpmi_clk *rpmi_clk;
451*5ba9f520SRahul Pathak 	struct clk_init_data init = {};
452*5ba9f520SRahul Pathak 	struct clk_hw *clk_hw;
453*5ba9f520SRahul Pathak 	int ret;
454*5ba9f520SRahul Pathak 
455*5ba9f520SRahul Pathak 	rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
456*5ba9f520SRahul Pathak 	if (!rates)
457*5ba9f520SRahul Pathak 		return ERR_PTR(-ENOMEM);
458*5ba9f520SRahul Pathak 
459*5ba9f520SRahul Pathak 	rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
460*5ba9f520SRahul Pathak 	if (!rpmi_clk)
461*5ba9f520SRahul Pathak 		return ERR_PTR(-ENOMEM);
462*5ba9f520SRahul Pathak 
463*5ba9f520SRahul Pathak 	rpmi_clk->context = context;
464*5ba9f520SRahul Pathak 	rpmi_clk->rates = rates;
465*5ba9f520SRahul Pathak 
466*5ba9f520SRahul Pathak 	ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
467*5ba9f520SRahul Pathak 	if (ret)
468*5ba9f520SRahul Pathak 		return dev_err_ptr_probe(dev, ret,
469*5ba9f520SRahul Pathak 					 "Failed to get clk-%u attributes\n",
470*5ba9f520SRahul Pathak 					 clkid);
471*5ba9f520SRahul Pathak 
472*5ba9f520SRahul Pathak 	ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
473*5ba9f520SRahul Pathak 	if (ret)
474*5ba9f520SRahul Pathak 		return dev_err_ptr_probe(dev, ret,
475*5ba9f520SRahul Pathak 					 "Get supported rates failed for clk-%u\n",
476*5ba9f520SRahul Pathak 					 clkid);
477*5ba9f520SRahul Pathak 
478*5ba9f520SRahul Pathak 	init.flags = CLK_GET_RATE_NOCACHE;
479*5ba9f520SRahul Pathak 	init.num_parents = 0;
480*5ba9f520SRahul Pathak 	init.ops = &rpmi_clk_ops;
481*5ba9f520SRahul Pathak 	init.name = rpmi_clk->name;
482*5ba9f520SRahul Pathak 	clk_hw = &rpmi_clk->hw;
483*5ba9f520SRahul Pathak 	clk_hw->init = &init;
484*5ba9f520SRahul Pathak 
485*5ba9f520SRahul Pathak 	ret = devm_clk_hw_register(dev, clk_hw);
486*5ba9f520SRahul Pathak 	if (ret)
487*5ba9f520SRahul Pathak 		return dev_err_ptr_probe(dev, ret,
488*5ba9f520SRahul Pathak 					 "Unable to register clk-%u\n",
489*5ba9f520SRahul Pathak 					 clkid);
490*5ba9f520SRahul Pathak 
491*5ba9f520SRahul Pathak 	if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
492*5ba9f520SRahul Pathak 		min_rate = rpmi_clk->rates->discrete[0];
493*5ba9f520SRahul Pathak 		max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates -  1];
494*5ba9f520SRahul Pathak 	} else {
495*5ba9f520SRahul Pathak 		min_rate = rpmi_clk->rates->linear.min;
496*5ba9f520SRahul Pathak 		max_rate = rpmi_clk->rates->linear.max;
497*5ba9f520SRahul Pathak 	}
498*5ba9f520SRahul Pathak 
499*5ba9f520SRahul Pathak 	clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
500*5ba9f520SRahul Pathak 
501*5ba9f520SRahul Pathak 	return clk_hw;
502*5ba9f520SRahul Pathak }
503*5ba9f520SRahul Pathak 
rpmi_clk_mbox_chan_release(void * data)504*5ba9f520SRahul Pathak static void rpmi_clk_mbox_chan_release(void *data)
505*5ba9f520SRahul Pathak {
506*5ba9f520SRahul Pathak 	struct mbox_chan *chan = data;
507*5ba9f520SRahul Pathak 
508*5ba9f520SRahul Pathak 	mbox_free_channel(chan);
509*5ba9f520SRahul Pathak }
510*5ba9f520SRahul Pathak 
rpmi_clk_probe(struct platform_device * pdev)511*5ba9f520SRahul Pathak static int rpmi_clk_probe(struct platform_device *pdev)
512*5ba9f520SRahul Pathak {
513*5ba9f520SRahul Pathak 	int ret;
514*5ba9f520SRahul Pathak 	unsigned int num_clocks, i;
515*5ba9f520SRahul Pathak 	struct clk_hw_onecell_data *clk_data;
516*5ba9f520SRahul Pathak 	struct rpmi_clk_context *context;
517*5ba9f520SRahul Pathak 	struct rpmi_mbox_message msg;
518*5ba9f520SRahul Pathak 	struct clk_hw *hw_ptr;
519*5ba9f520SRahul Pathak 	struct device *dev = &pdev->dev;
520*5ba9f520SRahul Pathak 
521*5ba9f520SRahul Pathak 	context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
522*5ba9f520SRahul Pathak 	if (!context)
523*5ba9f520SRahul Pathak 		return -ENOMEM;
524*5ba9f520SRahul Pathak 	context->dev = dev;
525*5ba9f520SRahul Pathak 	platform_set_drvdata(pdev, context);
526*5ba9f520SRahul Pathak 
527*5ba9f520SRahul Pathak 	context->client.dev		= context->dev;
528*5ba9f520SRahul Pathak 	context->client.rx_callback	= NULL;
529*5ba9f520SRahul Pathak 	context->client.tx_block	= false;
530*5ba9f520SRahul Pathak 	context->client.knows_txdone	= true;
531*5ba9f520SRahul Pathak 	context->client.tx_tout		= 0;
532*5ba9f520SRahul Pathak 
533*5ba9f520SRahul Pathak 	context->chan = mbox_request_channel(&context->client, 0);
534*5ba9f520SRahul Pathak 	if (IS_ERR(context->chan))
535*5ba9f520SRahul Pathak 		return PTR_ERR(context->chan);
536*5ba9f520SRahul Pathak 
537*5ba9f520SRahul Pathak 	ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
538*5ba9f520SRahul Pathak 	if (ret)
539*5ba9f520SRahul Pathak 		return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
540*5ba9f520SRahul Pathak 
541*5ba9f520SRahul Pathak 	rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
542*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
543*5ba9f520SRahul Pathak 	if (ret)
544*5ba9f520SRahul Pathak 		return dev_err_probe(dev, ret, "Failed to get spec version\n");
545*5ba9f520SRahul Pathak 	if (msg.attr.value < RPMI_MKVER(1, 0)) {
546*5ba9f520SRahul Pathak 		return dev_err_probe(dev, -EINVAL,
547*5ba9f520SRahul Pathak 				     "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
548*5ba9f520SRahul Pathak 				     RPMI_MKVER(1, 0), msg.attr.value);
549*5ba9f520SRahul Pathak 	}
550*5ba9f520SRahul Pathak 
551*5ba9f520SRahul Pathak 	rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
552*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
553*5ba9f520SRahul Pathak 	if (ret)
554*5ba9f520SRahul Pathak 		return dev_err_probe(dev, ret, "Failed to get service group ID\n");
555*5ba9f520SRahul Pathak 	if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
556*5ba9f520SRahul Pathak 		return dev_err_probe(dev, -EINVAL,
557*5ba9f520SRahul Pathak 				     "service group match failed, expected 0x%x, found 0x%x\n",
558*5ba9f520SRahul Pathak 				     RPMI_SRVGRP_CLOCK, msg.attr.value);
559*5ba9f520SRahul Pathak 	}
560*5ba9f520SRahul Pathak 
561*5ba9f520SRahul Pathak 	rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
562*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
563*5ba9f520SRahul Pathak 	if (ret)
564*5ba9f520SRahul Pathak 		return dev_err_probe(dev, ret, "Failed to get service group version\n");
565*5ba9f520SRahul Pathak 	if (msg.attr.value < RPMI_MKVER(1, 0)) {
566*5ba9f520SRahul Pathak 		return dev_err_probe(dev, -EINVAL,
567*5ba9f520SRahul Pathak 				     "service group version failed, expected 0x%x, found 0x%x\n",
568*5ba9f520SRahul Pathak 				     RPMI_MKVER(1, 0), msg.attr.value);
569*5ba9f520SRahul Pathak 	}
570*5ba9f520SRahul Pathak 
571*5ba9f520SRahul Pathak 	rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
572*5ba9f520SRahul Pathak 	ret = rpmi_mbox_send_message(context->chan, &msg);
573*5ba9f520SRahul Pathak 	if (ret)
574*5ba9f520SRahul Pathak 		return dev_err_probe(dev, ret, "Failed to get max message data size\n");
575*5ba9f520SRahul Pathak 
576*5ba9f520SRahul Pathak 	context->max_msg_data_size = msg.attr.value;
577*5ba9f520SRahul Pathak 	num_clocks = rpmi_clk_get_num_clocks(context);
578*5ba9f520SRahul Pathak 	if (!num_clocks)
579*5ba9f520SRahul Pathak 		return dev_err_probe(dev, -ENODEV, "No clocks found\n");
580*5ba9f520SRahul Pathak 
581*5ba9f520SRahul Pathak 	clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
582*5ba9f520SRahul Pathak 				GFP_KERNEL);
583*5ba9f520SRahul Pathak 	if (!clk_data)
584*5ba9f520SRahul Pathak 		return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
585*5ba9f520SRahul Pathak 	clk_data->num = num_clocks;
586*5ba9f520SRahul Pathak 
587*5ba9f520SRahul Pathak 	for (i = 0; i < clk_data->num; i++) {
588*5ba9f520SRahul Pathak 		hw_ptr = rpmi_clk_enumerate(context, i);
589*5ba9f520SRahul Pathak 		if (IS_ERR(hw_ptr)) {
590*5ba9f520SRahul Pathak 			return dev_err_probe(dev, PTR_ERR(hw_ptr),
591*5ba9f520SRahul Pathak 					     "Failed to register clk-%d\n", i);
592*5ba9f520SRahul Pathak 		}
593*5ba9f520SRahul Pathak 		clk_data->hws[i] = hw_ptr;
594*5ba9f520SRahul Pathak 	}
595*5ba9f520SRahul Pathak 
596*5ba9f520SRahul Pathak 	ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
597*5ba9f520SRahul Pathak 	if (ret)
598*5ba9f520SRahul Pathak 		return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
599*5ba9f520SRahul Pathak 
600*5ba9f520SRahul Pathak 	return 0;
601*5ba9f520SRahul Pathak }
602*5ba9f520SRahul Pathak 
603*5ba9f520SRahul Pathak static const struct of_device_id rpmi_clk_of_match[] = {
604*5ba9f520SRahul Pathak 	{ .compatible = "riscv,rpmi-clock" },
605*5ba9f520SRahul Pathak 	{ }
606*5ba9f520SRahul Pathak };
607*5ba9f520SRahul Pathak MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
608*5ba9f520SRahul Pathak 
609*5ba9f520SRahul Pathak static struct platform_driver rpmi_clk_driver = {
610*5ba9f520SRahul Pathak 	.driver = {
611*5ba9f520SRahul Pathak 		.name = "riscv-rpmi-clock",
612*5ba9f520SRahul Pathak 		.of_match_table = rpmi_clk_of_match,
613*5ba9f520SRahul Pathak 	},
614*5ba9f520SRahul Pathak 	.probe = rpmi_clk_probe,
615*5ba9f520SRahul Pathak };
616*5ba9f520SRahul Pathak module_platform_driver(rpmi_clk_driver);
617*5ba9f520SRahul Pathak 
618*5ba9f520SRahul Pathak MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
619*5ba9f520SRahul Pathak MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
620*5ba9f520SRahul Pathak MODULE_LICENSE("GPL");
621