1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * RISC-V MPXY Based Clock Driver
4 *
5 * Copyright (C) 2025 Ventana Micro Systems Ltd.
6 */
7
8 #include <linux/clk-provider.h>
9 #include <linux/err.h>
10 #include <linux/mailbox_client.h>
11 #include <linux/mailbox/riscv-rpmi-message.h>
12 #include <linux/module.h>
13 #include <linux/platform_device.h>
14 #include <linux/types.h>
15 #include <linux/slab.h>
16 #include <linux/wordpart.h>
17
18 #define RPMI_CLK_DISCRETE_MAX_NUM_RATES 16
19 #define RPMI_CLK_NAME_LEN 16
20
21 #define to_rpmi_clk(clk) container_of(clk, struct rpmi_clk, hw)
22
23 enum rpmi_clk_config {
24 RPMI_CLK_DISABLE = 0,
25 RPMI_CLK_ENABLE = 1,
26 RPMI_CLK_CONFIG_MAX_IDX
27 };
28
29 #define RPMI_CLK_TYPE_MASK GENMASK(1, 0)
30 enum rpmi_clk_type {
31 RPMI_CLK_DISCRETE = 0,
32 RPMI_CLK_LINEAR = 1,
33 RPMI_CLK_TYPE_MAX_IDX
34 };
35
36 struct rpmi_clk_context {
37 struct device *dev;
38 struct mbox_chan *chan;
39 struct mbox_client client;
40 u32 max_msg_data_size;
41 };
42
43 /*
44 * rpmi_clk_rates represents the rates format
45 * as specified by the RPMI specification.
46 * No other data format (e.g., struct linear_range)
47 * is required to avoid to and from conversion.
48 */
49 union rpmi_clk_rates {
50 u64 discrete[RPMI_CLK_DISCRETE_MAX_NUM_RATES];
51 struct {
52 u64 min;
53 u64 max;
54 u64 step;
55 } linear;
56 };
57
58 struct rpmi_clk {
59 struct rpmi_clk_context *context;
60 u32 id;
61 u32 num_rates;
62 u32 transition_latency;
63 enum rpmi_clk_type type;
64 union rpmi_clk_rates *rates;
65 char name[RPMI_CLK_NAME_LEN];
66 struct clk_hw hw;
67 };
68
69 struct rpmi_clk_rate_discrete {
70 __le32 lo;
71 __le32 hi;
72 };
73
74 struct rpmi_clk_rate_linear {
75 __le32 min_lo;
76 __le32 min_hi;
77 __le32 max_lo;
78 __le32 max_hi;
79 __le32 step_lo;
80 __le32 step_hi;
81 };
82
83 struct rpmi_get_num_clocks_rx {
84 __le32 status;
85 __le32 num_clocks;
86 };
87
88 struct rpmi_get_attrs_tx {
89 __le32 clkid;
90 };
91
92 struct rpmi_get_attrs_rx {
93 __le32 status;
94 __le32 flags;
95 __le32 num_rates;
96 __le32 transition_latency;
97 char name[RPMI_CLK_NAME_LEN];
98 };
99
100 struct rpmi_get_supp_rates_tx {
101 __le32 clkid;
102 __le32 clk_rate_idx;
103 };
104
105 struct rpmi_get_supp_rates_rx {
106 __le32 status;
107 __le32 flags;
108 __le32 remaining;
109 __le32 returned;
110 __le32 rates[];
111 };
112
113 struct rpmi_get_rate_tx {
114 __le32 clkid;
115 };
116
117 struct rpmi_get_rate_rx {
118 __le32 status;
119 __le32 lo;
120 __le32 hi;
121 };
122
123 struct rpmi_set_rate_tx {
124 __le32 clkid;
125 __le32 flags;
126 __le32 lo;
127 __le32 hi;
128 };
129
130 struct rpmi_set_rate_rx {
131 __le32 status;
132 };
133
134 struct rpmi_set_config_tx {
135 __le32 clkid;
136 __le32 config;
137 };
138
139 struct rpmi_set_config_rx {
140 __le32 status;
141 };
142
rpmi_clkrate_u64(u32 __hi,u32 __lo)143 static inline u64 rpmi_clkrate_u64(u32 __hi, u32 __lo)
144 {
145 return (((u64)(__hi) << 32) | (u32)(__lo));
146 }
147
rpmi_clk_get_num_clocks(struct rpmi_clk_context * context)148 static u32 rpmi_clk_get_num_clocks(struct rpmi_clk_context *context)
149 {
150 struct rpmi_get_num_clocks_rx rx, *resp;
151 struct rpmi_mbox_message msg;
152 int ret;
153
154 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_NUM_CLOCKS,
155 NULL, 0, &rx, sizeof(rx));
156
157 ret = rpmi_mbox_send_message(context->chan, &msg);
158 if (ret)
159 return 0;
160
161 resp = rpmi_mbox_get_msg_response(&msg);
162 if (!resp || resp->status)
163 return 0;
164
165 return le32_to_cpu(resp->num_clocks);
166 }
167
rpmi_clk_get_attrs(u32 clkid,struct rpmi_clk * rpmi_clk)168 static int rpmi_clk_get_attrs(u32 clkid, struct rpmi_clk *rpmi_clk)
169 {
170 struct rpmi_clk_context *context = rpmi_clk->context;
171 struct rpmi_mbox_message msg;
172 struct rpmi_get_attrs_tx tx;
173 struct rpmi_get_attrs_rx rx, *resp;
174 u8 format;
175 int ret;
176
177 tx.clkid = cpu_to_le32(clkid);
178 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_ATTRIBUTES,
179 &tx, sizeof(tx), &rx, sizeof(rx));
180
181 ret = rpmi_mbox_send_message(context->chan, &msg);
182 if (ret)
183 return ret;
184
185 resp = rpmi_mbox_get_msg_response(&msg);
186 if (!resp)
187 return -EINVAL;
188 if (resp->status)
189 return rpmi_to_linux_error(le32_to_cpu(resp->status));
190
191 rpmi_clk->id = clkid;
192 rpmi_clk->num_rates = le32_to_cpu(resp->num_rates);
193 rpmi_clk->transition_latency = le32_to_cpu(resp->transition_latency);
194 strscpy(rpmi_clk->name, resp->name, RPMI_CLK_NAME_LEN);
195
196 format = le32_to_cpu(resp->flags) & RPMI_CLK_TYPE_MASK;
197 if (format >= RPMI_CLK_TYPE_MAX_IDX)
198 return -EINVAL;
199
200 rpmi_clk->type = format;
201
202 return 0;
203 }
204
rpmi_clk_get_supported_rates(u32 clkid,struct rpmi_clk * rpmi_clk)205 static int rpmi_clk_get_supported_rates(u32 clkid, struct rpmi_clk *rpmi_clk)
206 {
207 struct rpmi_clk_context *context = rpmi_clk->context;
208 struct rpmi_clk_rate_discrete *rate_discrete;
209 struct rpmi_clk_rate_linear *rate_linear;
210 struct rpmi_get_supp_rates_tx tx;
211 struct rpmi_get_supp_rates_rx *resp;
212 struct rpmi_mbox_message msg;
213 size_t clk_rate_idx;
214 int ret, rateidx, j;
215
216 tx.clkid = cpu_to_le32(clkid);
217 tx.clk_rate_idx = 0;
218
219 /*
220 * Make sure we allocate rx buffer sufficient to be accommodate all
221 * the rates sent in one RPMI message.
222 */
223 struct rpmi_get_supp_rates_rx *rx __free(kfree) =
224 kzalloc(context->max_msg_data_size, GFP_KERNEL);
225 if (!rx)
226 return -ENOMEM;
227
228 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_SUPPORTED_RATES,
229 &tx, sizeof(tx), rx, context->max_msg_data_size);
230
231 ret = rpmi_mbox_send_message(context->chan, &msg);
232 if (ret)
233 return ret;
234
235 resp = rpmi_mbox_get_msg_response(&msg);
236 if (!resp)
237 return -EINVAL;
238 if (resp->status)
239 return rpmi_to_linux_error(le32_to_cpu(resp->status));
240 if (!le32_to_cpu(resp->returned))
241 return -EINVAL;
242
243 if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
244 rate_discrete = (struct rpmi_clk_rate_discrete *)resp->rates;
245
246 for (rateidx = 0; rateidx < le32_to_cpu(resp->returned); rateidx++) {
247 rpmi_clk->rates->discrete[rateidx] =
248 rpmi_clkrate_u64(le32_to_cpu(rate_discrete[rateidx].hi),
249 le32_to_cpu(rate_discrete[rateidx].lo));
250 }
251
252 /*
253 * Keep sending the request message until all
254 * the rates are received.
255 */
256 clk_rate_idx = 0;
257 while (le32_to_cpu(resp->remaining)) {
258 clk_rate_idx += le32_to_cpu(resp->returned);
259 tx.clk_rate_idx = cpu_to_le32(clk_rate_idx);
260
261 rpmi_mbox_init_send_with_response(&msg,
262 RPMI_CLK_SRV_GET_SUPPORTED_RATES,
263 &tx, sizeof(tx),
264 rx, context->max_msg_data_size);
265
266 ret = rpmi_mbox_send_message(context->chan, &msg);
267 if (ret)
268 return ret;
269
270 resp = rpmi_mbox_get_msg_response(&msg);
271 if (!resp)
272 return -EINVAL;
273 if (resp->status)
274 return rpmi_to_linux_error(le32_to_cpu(resp->status));
275 if (!le32_to_cpu(resp->returned))
276 return -EINVAL;
277
278 for (j = 0; j < le32_to_cpu(resp->returned); j++) {
279 if (rateidx >= clk_rate_idx + le32_to_cpu(resp->returned))
280 break;
281 rpmi_clk->rates->discrete[rateidx++] =
282 rpmi_clkrate_u64(le32_to_cpu(rate_discrete[j].hi),
283 le32_to_cpu(rate_discrete[j].lo));
284 }
285 }
286 } else if (rpmi_clk->type == RPMI_CLK_LINEAR) {
287 rate_linear = (struct rpmi_clk_rate_linear *)resp->rates;
288
289 rpmi_clk->rates->linear.min = rpmi_clkrate_u64(le32_to_cpu(rate_linear->min_hi),
290 le32_to_cpu(rate_linear->min_lo));
291 rpmi_clk->rates->linear.max = rpmi_clkrate_u64(le32_to_cpu(rate_linear->max_hi),
292 le32_to_cpu(rate_linear->max_lo));
293 rpmi_clk->rates->linear.step = rpmi_clkrate_u64(le32_to_cpu(rate_linear->step_hi),
294 le32_to_cpu(rate_linear->step_lo));
295 }
296
297 return 0;
298 }
299
rpmi_clk_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)300 static unsigned long rpmi_clk_recalc_rate(struct clk_hw *hw,
301 unsigned long parent_rate)
302 {
303 struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
304 struct rpmi_clk_context *context = rpmi_clk->context;
305 struct rpmi_mbox_message msg;
306 struct rpmi_get_rate_tx tx;
307 struct rpmi_get_rate_rx rx, *resp;
308 int ret;
309
310 tx.clkid = cpu_to_le32(rpmi_clk->id);
311
312 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_GET_RATE,
313 &tx, sizeof(tx), &rx, sizeof(rx));
314
315 ret = rpmi_mbox_send_message(context->chan, &msg);
316 if (ret)
317 return ret;
318
319 resp = rpmi_mbox_get_msg_response(&msg);
320 if (!resp)
321 return -EINVAL;
322 if (resp->status)
323 return rpmi_to_linux_error(le32_to_cpu(resp->status));
324
325 return rpmi_clkrate_u64(le32_to_cpu(resp->hi), le32_to_cpu(resp->lo));
326 }
327
rpmi_clk_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)328 static int rpmi_clk_determine_rate(struct clk_hw *hw,
329 struct clk_rate_request *req)
330 {
331 struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
332 u64 fmin, fmax, ftmp;
333
334 /*
335 * Keep the requested rate if the clock format
336 * is of discrete type. Let the platform which
337 * is actually controlling the clock handle that.
338 */
339 if (rpmi_clk->type == RPMI_CLK_DISCRETE)
340 return 0;
341
342 fmin = rpmi_clk->rates->linear.min;
343 fmax = rpmi_clk->rates->linear.max;
344
345 if (req->rate <= fmin) {
346 req->rate = fmin;
347 return 0;
348 } else if (req->rate >= fmax) {
349 req->rate = fmax;
350 return 0;
351 }
352
353 ftmp = req->rate - fmin;
354 ftmp += rpmi_clk->rates->linear.step - 1;
355 do_div(ftmp, rpmi_clk->rates->linear.step);
356
357 req->rate = ftmp * rpmi_clk->rates->linear.step + fmin;
358
359 return 0;
360 }
361
rpmi_clk_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)362 static int rpmi_clk_set_rate(struct clk_hw *hw, unsigned long rate,
363 unsigned long parent_rate)
364 {
365 struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
366 struct rpmi_clk_context *context = rpmi_clk->context;
367 struct rpmi_mbox_message msg;
368 struct rpmi_set_rate_tx tx;
369 struct rpmi_set_rate_rx rx, *resp;
370 int ret;
371
372 tx.clkid = cpu_to_le32(rpmi_clk->id);
373 tx.lo = cpu_to_le32(lower_32_bits(rate));
374 tx.hi = cpu_to_le32(upper_32_bits(rate));
375
376 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_RATE,
377 &tx, sizeof(tx), &rx, sizeof(rx));
378
379 ret = rpmi_mbox_send_message(context->chan, &msg);
380 if (ret)
381 return ret;
382
383 resp = rpmi_mbox_get_msg_response(&msg);
384 if (!resp)
385 return -EINVAL;
386 if (resp->status)
387 return rpmi_to_linux_error(le32_to_cpu(resp->status));
388
389 return 0;
390 }
391
rpmi_clk_enable(struct clk_hw * hw)392 static int rpmi_clk_enable(struct clk_hw *hw)
393 {
394 struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
395 struct rpmi_clk_context *context = rpmi_clk->context;
396 struct rpmi_mbox_message msg;
397 struct rpmi_set_config_tx tx;
398 struct rpmi_set_config_rx rx, *resp;
399 int ret;
400
401 tx.config = cpu_to_le32(RPMI_CLK_ENABLE);
402 tx.clkid = cpu_to_le32(rpmi_clk->id);
403
404 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
405 &tx, sizeof(tx), &rx, sizeof(rx));
406
407 ret = rpmi_mbox_send_message(context->chan, &msg);
408 if (ret)
409 return ret;
410
411 resp = rpmi_mbox_get_msg_response(&msg);
412 if (!resp)
413 return -EINVAL;
414 if (resp->status)
415 return rpmi_to_linux_error(le32_to_cpu(resp->status));
416
417 return 0;
418 }
419
rpmi_clk_disable(struct clk_hw * hw)420 static void rpmi_clk_disable(struct clk_hw *hw)
421 {
422 struct rpmi_clk *rpmi_clk = to_rpmi_clk(hw);
423 struct rpmi_clk_context *context = rpmi_clk->context;
424 struct rpmi_mbox_message msg;
425 struct rpmi_set_config_tx tx;
426 struct rpmi_set_config_rx rx;
427
428 tx.config = cpu_to_le32(RPMI_CLK_DISABLE);
429 tx.clkid = cpu_to_le32(rpmi_clk->id);
430
431 rpmi_mbox_init_send_with_response(&msg, RPMI_CLK_SRV_SET_CONFIG,
432 &tx, sizeof(tx), &rx, sizeof(rx));
433
434 rpmi_mbox_send_message(context->chan, &msg);
435 }
436
437 static const struct clk_ops rpmi_clk_ops = {
438 .recalc_rate = rpmi_clk_recalc_rate,
439 .determine_rate = rpmi_clk_determine_rate,
440 .set_rate = rpmi_clk_set_rate,
441 .prepare = rpmi_clk_enable,
442 .unprepare = rpmi_clk_disable,
443 };
444
rpmi_clk_enumerate(struct rpmi_clk_context * context,u32 clkid)445 static struct clk_hw *rpmi_clk_enumerate(struct rpmi_clk_context *context, u32 clkid)
446 {
447 struct device *dev = context->dev;
448 unsigned long min_rate, max_rate;
449 union rpmi_clk_rates *rates;
450 struct rpmi_clk *rpmi_clk;
451 struct clk_init_data init = {};
452 struct clk_hw *clk_hw;
453 int ret;
454
455 rates = devm_kzalloc(dev, sizeof(*rates), GFP_KERNEL);
456 if (!rates)
457 return ERR_PTR(-ENOMEM);
458
459 rpmi_clk = devm_kzalloc(dev, sizeof(*rpmi_clk), GFP_KERNEL);
460 if (!rpmi_clk)
461 return ERR_PTR(-ENOMEM);
462
463 rpmi_clk->context = context;
464 rpmi_clk->rates = rates;
465
466 ret = rpmi_clk_get_attrs(clkid, rpmi_clk);
467 if (ret)
468 return dev_err_ptr_probe(dev, ret,
469 "Failed to get clk-%u attributes\n",
470 clkid);
471
472 ret = rpmi_clk_get_supported_rates(clkid, rpmi_clk);
473 if (ret)
474 return dev_err_ptr_probe(dev, ret,
475 "Get supported rates failed for clk-%u\n",
476 clkid);
477
478 init.flags = CLK_GET_RATE_NOCACHE;
479 init.num_parents = 0;
480 init.ops = &rpmi_clk_ops;
481 init.name = rpmi_clk->name;
482 clk_hw = &rpmi_clk->hw;
483 clk_hw->init = &init;
484
485 ret = devm_clk_hw_register(dev, clk_hw);
486 if (ret)
487 return dev_err_ptr_probe(dev, ret,
488 "Unable to register clk-%u\n",
489 clkid);
490
491 if (rpmi_clk->type == RPMI_CLK_DISCRETE) {
492 min_rate = rpmi_clk->rates->discrete[0];
493 max_rate = rpmi_clk->rates->discrete[rpmi_clk->num_rates - 1];
494 } else {
495 min_rate = rpmi_clk->rates->linear.min;
496 max_rate = rpmi_clk->rates->linear.max;
497 }
498
499 clk_hw_set_rate_range(clk_hw, min_rate, max_rate);
500
501 return clk_hw;
502 }
503
rpmi_clk_mbox_chan_release(void * data)504 static void rpmi_clk_mbox_chan_release(void *data)
505 {
506 struct mbox_chan *chan = data;
507
508 mbox_free_channel(chan);
509 }
510
rpmi_clk_probe(struct platform_device * pdev)511 static int rpmi_clk_probe(struct platform_device *pdev)
512 {
513 int ret;
514 unsigned int num_clocks, i;
515 struct clk_hw_onecell_data *clk_data;
516 struct rpmi_clk_context *context;
517 struct rpmi_mbox_message msg;
518 struct clk_hw *hw_ptr;
519 struct device *dev = &pdev->dev;
520
521 context = devm_kzalloc(dev, sizeof(*context), GFP_KERNEL);
522 if (!context)
523 return -ENOMEM;
524 context->dev = dev;
525 platform_set_drvdata(pdev, context);
526
527 context->client.dev = context->dev;
528 context->client.rx_callback = NULL;
529 context->client.tx_block = false;
530 context->client.knows_txdone = true;
531 context->client.tx_tout = 0;
532
533 context->chan = mbox_request_channel(&context->client, 0);
534 if (IS_ERR(context->chan))
535 return PTR_ERR(context->chan);
536
537 ret = devm_add_action_or_reset(dev, rpmi_clk_mbox_chan_release, context->chan);
538 if (ret)
539 return dev_err_probe(dev, ret, "Failed to add rpmi mbox channel cleanup\n");
540
541 rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SPEC_VERSION);
542 ret = rpmi_mbox_send_message(context->chan, &msg);
543 if (ret)
544 return dev_err_probe(dev, ret, "Failed to get spec version\n");
545 if (msg.attr.value < RPMI_MKVER(1, 0)) {
546 return dev_err_probe(dev, -EINVAL,
547 "msg protocol version mismatch, expected 0x%x, found 0x%x\n",
548 RPMI_MKVER(1, 0), msg.attr.value);
549 }
550
551 rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_ID);
552 ret = rpmi_mbox_send_message(context->chan, &msg);
553 if (ret)
554 return dev_err_probe(dev, ret, "Failed to get service group ID\n");
555 if (msg.attr.value != RPMI_SRVGRP_CLOCK) {
556 return dev_err_probe(dev, -EINVAL,
557 "service group match failed, expected 0x%x, found 0x%x\n",
558 RPMI_SRVGRP_CLOCK, msg.attr.value);
559 }
560
561 rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_SERVICEGROUP_VERSION);
562 ret = rpmi_mbox_send_message(context->chan, &msg);
563 if (ret)
564 return dev_err_probe(dev, ret, "Failed to get service group version\n");
565 if (msg.attr.value < RPMI_MKVER(1, 0)) {
566 return dev_err_probe(dev, -EINVAL,
567 "service group version failed, expected 0x%x, found 0x%x\n",
568 RPMI_MKVER(1, 0), msg.attr.value);
569 }
570
571 rpmi_mbox_init_get_attribute(&msg, RPMI_MBOX_ATTR_MAX_MSG_DATA_SIZE);
572 ret = rpmi_mbox_send_message(context->chan, &msg);
573 if (ret)
574 return dev_err_probe(dev, ret, "Failed to get max message data size\n");
575
576 context->max_msg_data_size = msg.attr.value;
577 num_clocks = rpmi_clk_get_num_clocks(context);
578 if (!num_clocks)
579 return dev_err_probe(dev, -ENODEV, "No clocks found\n");
580
581 clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, num_clocks),
582 GFP_KERNEL);
583 if (!clk_data)
584 return dev_err_probe(dev, -ENOMEM, "No memory for clock data\n");
585 clk_data->num = num_clocks;
586
587 for (i = 0; i < clk_data->num; i++) {
588 hw_ptr = rpmi_clk_enumerate(context, i);
589 if (IS_ERR(hw_ptr)) {
590 return dev_err_probe(dev, PTR_ERR(hw_ptr),
591 "Failed to register clk-%d\n", i);
592 }
593 clk_data->hws[i] = hw_ptr;
594 }
595
596 ret = devm_of_clk_add_hw_provider(dev, of_clk_hw_onecell_get, clk_data);
597 if (ret)
598 return dev_err_probe(dev, ret, "Failed to register clock HW provider\n");
599
600 return 0;
601 }
602
603 static const struct of_device_id rpmi_clk_of_match[] = {
604 { .compatible = "riscv,rpmi-clock" },
605 { }
606 };
607 MODULE_DEVICE_TABLE(of, rpmi_clk_of_match);
608
609 static struct platform_driver rpmi_clk_driver = {
610 .driver = {
611 .name = "riscv-rpmi-clock",
612 .of_match_table = rpmi_clk_of_match,
613 },
614 .probe = rpmi_clk_probe,
615 };
616 module_platform_driver(rpmi_clk_driver);
617
618 MODULE_AUTHOR("Rahul Pathak <rpathak@ventanamicro.com>");
619 MODULE_DESCRIPTION("Clock Driver based on RPMI message protocol");
620 MODULE_LICENSE("GPL");
621