1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs
4 *
5 * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
6 * Copyright (c) 2022, Aurelien Jarno
7 * Copyright (c) 2025, Collabora Ltd.
8 * Authors:
9 * Lin Jinhan <troy.lin@rock-chips.com>
10 * Aurelien Jarno <aurelien@aurel32.net>
11 * Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
12 */
13 #include <linux/clk.h>
14 #include <linux/hw_random.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23 #include <linux/slab.h>
24
25 #define RK_RNG_AUTOSUSPEND_DELAY 100
26 #define RK_RNG_MAX_BYTE 32
27 #define RK_RNG_POLL_PERIOD_US 100
28 #define RK_RNG_POLL_TIMEOUT_US 10000
29
30 /*
31 * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is
32 * a tradeoff between speed and quality and has been adjusted to get a quality
33 * of ~900 (~87.5% of FIPS 140-2 successes).
34 */
35 #define RK_RNG_SAMPLE_CNT 1000
36
37 /* after how many bytes of output TRNGv1 implementations should be reseeded */
38 #define RK_TRNG_V1_AUTO_RESEED_CNT 16000
39
40 /* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
41 #define TRNG_RST_CTL 0x0004
42 #define TRNG_RNG_CTL 0x0400
43 #define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4)
44 #define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4)
45 #define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4)
46 #define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4)
47 #define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2)
48 #define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2)
49 #define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2)
50 #define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2)
51 #define TRNG_RNG_CTL_MASK GENMASK(15, 0)
52 #define TRNG_RNG_CTL_ENABLE BIT(1)
53 #define TRNG_RNG_CTL_START BIT(0)
54 #define TRNG_RNG_SAMPLE_CNT 0x0404
55 #define TRNG_RNG_DOUT 0x0410
56
57 /*
58 * TRNG V1 register definitions
59 * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP)
60 * and can be found in the Rockchip RK3588 SoC
61 */
62 #define TRNG_V1_CTRL 0x0000
63 #define TRNG_V1_CTRL_NOP 0x00
64 #define TRNG_V1_CTRL_RAND 0x01
65 #define TRNG_V1_CTRL_SEED 0x02
66
67 #define TRNG_V1_STAT 0x0004
68 #define TRNG_V1_STAT_SEEDED BIT(9)
69 #define TRNG_V1_STAT_GENERATING BIT(30)
70 #define TRNG_V1_STAT_RESEEDING BIT(31)
71
72 #define TRNG_V1_MODE 0x0008
73 #define TRNG_V1_MODE_128_BIT (0x00 << 3)
74 #define TRNG_V1_MODE_256_BIT (0x01 << 3)
75
76 /* Interrupt Enable register; unused because polling is faster */
77 #define TRNG_V1_IE 0x0010
78 #define TRNG_V1_IE_GLBL_EN BIT(31)
79 #define TRNG_V1_IE_SEED_DONE_EN BIT(1)
80 #define TRNG_V1_IE_RAND_RDY_EN BIT(0)
81
82 #define TRNG_V1_ISTAT 0x0014
83 #define TRNG_V1_ISTAT_RAND_RDY BIT(0)
84
85 /* RAND0 ~ RAND7 */
86 #define TRNG_V1_RAND0 0x0020
87 #define TRNG_V1_RAND7 0x003C
88
89 /* Auto Reseed Register */
90 #define TRNG_V1_AUTO_RQSTS 0x0060
91
92 #define TRNG_V1_VERSION 0x00F0
93 #define TRNG_v1_VERSION_CODE 0x46bc
94 /* end of TRNG_V1 register definitions */
95
96 /*
97 * RKRNG register definitions
98 * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
99 * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
100 * SoCs. It can either output true randomness (TRNG) or "deterministic"
101 * randomness derived from hashing the true entropy (DRNG). This driver
102 * implementation uses just the true entropy, and leaves stretching the entropy
103 * up to Linux.
104 */
105 #define RKRNG_CFG 0x0000
106 #define RKRNG_CTRL 0x0010
107 #define RKRNG_CTRL_REQ_TRNG BIT(4)
108 #define RKRNG_STATE 0x0014
109 #define RKRNG_STATE_TRNG_RDY BIT(4)
110 #define RKRNG_TRNG_DATA0 0x0050
111 #define RKRNG_TRNG_DATA1 0x0054
112 #define RKRNG_TRNG_DATA2 0x0058
113 #define RKRNG_TRNG_DATA3 0x005C
114 #define RKRNG_TRNG_DATA4 0x0060
115 #define RKRNG_TRNG_DATA5 0x0064
116 #define RKRNG_TRNG_DATA6 0x0068
117 #define RKRNG_TRNG_DATA7 0x006C
118 #define RKRNG_READ_LEN 32
119
120 /* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
121 static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
122 "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
123
124 struct rk_rng {
125 struct hwrng rng;
126 void __iomem *base;
127 int clk_num;
128 struct clk_bulk_data *clk_bulks;
129 const struct rk_rng_soc_data *soc_data;
130 struct device *dev;
131 };
132
133 struct rk_rng_soc_data {
134 int (*rk_rng_init)(struct hwrng *rng);
135 int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait);
136 void (*rk_rng_cleanup)(struct hwrng *rng);
137 unsigned short quality;
138 bool reset_optional;
139 };
140
141 /* The mask in the upper 16 bits determines the bits that are updated */
rk_rng_write_ctl(struct rk_rng * rng,u32 val,u32 mask)142 static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
143 {
144 writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
145 }
146
rk_rng_writel(struct rk_rng * rng,u32 val,u32 offset)147 static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
148 {
149 writel(val, rng->base + offset);
150 }
151
rk_rng_readl(struct rk_rng * rng,u32 offset)152 static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
153 {
154 return readl(rng->base + offset);
155 }
156
rk_rng_enable_clks(struct rk_rng * rk_rng)157 static int rk_rng_enable_clks(struct rk_rng *rk_rng)
158 {
159 int ret;
160 /* start clocks */
161 ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
162 if (ret < 0) {
163 dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
164 return ret;
165 }
166
167 return 0;
168 }
169
rk3568_rng_init(struct hwrng * rng)170 static int rk3568_rng_init(struct hwrng *rng)
171 {
172 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
173 int ret;
174
175 ret = rk_rng_enable_clks(rk_rng);
176 if (ret < 0)
177 return ret;
178
179 /* set the sample period */
180 writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
181
182 /* set osc ring speed and enable it */
183 rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
184 TRNG_RNG_CTL_OSC_RING_SPEED_0 |
185 TRNG_RNG_CTL_ENABLE,
186 TRNG_RNG_CTL_MASK);
187
188 return 0;
189 }
190
rk3568_rng_cleanup(struct hwrng * rng)191 static void rk3568_rng_cleanup(struct hwrng *rng)
192 {
193 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
194
195 /* stop TRNG */
196 rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
197
198 /* stop clocks */
199 clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
200 }
201
rk3568_rng_read(struct hwrng * rng,void * buf,size_t max,bool wait)202 static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
203 {
204 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
205 size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
206 u32 reg;
207 int ret = 0;
208
209 ret = pm_runtime_resume_and_get(rk_rng->dev);
210 if (ret < 0)
211 return ret;
212
213 /* Start collecting random data */
214 rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
215
216 ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
217 !(reg & TRNG_RNG_CTL_START),
218 RK_RNG_POLL_PERIOD_US,
219 RK_RNG_POLL_TIMEOUT_US);
220 if (ret < 0)
221 goto out;
222
223 /* Read random data stored in the registers */
224 memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
225 out:
226 pm_runtime_put_sync_autosuspend(rk_rng->dev);
227
228 return (ret < 0) ? ret : to_read;
229 }
230
rk3576_rng_init(struct hwrng * rng)231 static int rk3576_rng_init(struct hwrng *rng)
232 {
233 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
234
235 return rk_rng_enable_clks(rk_rng);
236 }
237
rk3576_rng_read(struct hwrng * rng,void * buf,size_t max,bool wait)238 static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
239 {
240 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
241 size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
242 int ret = 0;
243 u32 val;
244
245 ret = pm_runtime_resume_and_get(rk_rng->dev);
246 if (ret < 0)
247 return ret;
248
249 rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
250 RKRNG_CTRL);
251
252 if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
253 (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
254 RK_RNG_POLL_TIMEOUT_US)) {
255 dev_err(rk_rng->dev, "timed out waiting for data\n");
256 ret = -ETIMEDOUT;
257 goto out;
258 }
259
260 rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
261
262 memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
263
264 out:
265 pm_runtime_put_sync_autosuspend(rk_rng->dev);
266
267 return (ret < 0) ? ret : to_read;
268 }
269
rk3588_rng_init(struct hwrng * rng)270 static int rk3588_rng_init(struct hwrng *rng)
271 {
272 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
273 u32 version, status, mask, istat;
274 int ret;
275
276 ret = rk_rng_enable_clks(rk_rng);
277 if (ret < 0)
278 return ret;
279
280 version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
281 if (version != TRNG_v1_VERSION_CODE) {
282 dev_err(rk_rng->dev,
283 "wrong trng version, expected = %08x, actual = %08x\n",
284 TRNG_V1_VERSION, version);
285 ret = -EFAULT;
286 goto err_disable_clk;
287 }
288
289 mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING |
290 TRNG_V1_STAT_RESEEDING;
291 if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
292 (status & mask) == TRNG_V1_STAT_SEEDED,
293 RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) {
294 dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
295 ret = -ETIMEDOUT;
296 goto err_disable_clk;
297 }
298
299 /*
300 * clear ISTAT flag, downstream advises to do this to avoid
301 * auto-reseeding "on power on"
302 */
303 istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
304 rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
305
306 /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */
307 rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
308
309 return 0;
310 err_disable_clk:
311 clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
312 return ret;
313 }
314
rk3588_rng_cleanup(struct hwrng * rng)315 static void rk3588_rng_cleanup(struct hwrng *rng)
316 {
317 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
318
319 clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
320 }
321
rk3588_rng_read(struct hwrng * rng,void * buf,size_t max,bool wait)322 static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
323 {
324 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
325 size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
326 int ret = 0;
327 u32 reg;
328
329 ret = pm_runtime_resume_and_get(rk_rng->dev);
330 if (ret < 0)
331 return ret;
332
333 /* Clear ISTAT, even without interrupts enabled, this will be updated */
334 reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
335 rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
336
337 /* generate 256 bits of random data */
338 rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
339 rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
340
341 ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
342 (reg & TRNG_V1_ISTAT_RAND_RDY), 0,
343 RK_RNG_POLL_TIMEOUT_US);
344 if (ret < 0)
345 goto out;
346
347 /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */
348 memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
349
350 out:
351 /* Clear ISTAT */
352 rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
353 /* close the TRNG */
354 rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
355
356 pm_runtime_put_sync_autosuspend(rk_rng->dev);
357
358 return (ret < 0) ? ret : to_read;
359 }
360
361 static const struct rk_rng_soc_data rk3568_soc_data = {
362 .rk_rng_init = rk3568_rng_init,
363 .rk_rng_read = rk3568_rng_read,
364 .rk_rng_cleanup = rk3568_rng_cleanup,
365 .quality = 900,
366 .reset_optional = false,
367 };
368
369 static const struct rk_rng_soc_data rk3576_soc_data = {
370 .rk_rng_init = rk3576_rng_init,
371 .rk_rng_read = rk3576_rng_read,
372 .rk_rng_cleanup = rk3588_rng_cleanup,
373 .quality = 999, /* as determined by actual testing */
374 .reset_optional = true,
375 };
376
377 static const struct rk_rng_soc_data rk3588_soc_data = {
378 .rk_rng_init = rk3588_rng_init,
379 .rk_rng_read = rk3588_rng_read,
380 .rk_rng_cleanup = rk3588_rng_cleanup,
381 .quality = 999, /* as determined by actual testing */
382 .reset_optional = true,
383 };
384
rk_rng_probe(struct platform_device * pdev)385 static int rk_rng_probe(struct platform_device *pdev)
386 {
387 struct device *dev = &pdev->dev;
388 struct reset_control *rst;
389 struct rk_rng *rk_rng;
390 int ret;
391
392 rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
393 if (!rk_rng)
394 return -ENOMEM;
395
396 rk_rng->soc_data = of_device_get_match_data(dev);
397 rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
398 if (IS_ERR(rk_rng->base))
399 return PTR_ERR(rk_rng->base);
400
401 rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
402 if (rk_rng->clk_num < 0)
403 return dev_err_probe(dev, rk_rng->clk_num,
404 "Failed to get clks property\n");
405
406 if (rk_rng->soc_data->reset_optional)
407 rst = devm_reset_control_array_get_optional_exclusive(dev);
408 else
409 rst = devm_reset_control_array_get_exclusive(dev);
410
411 if (rst) {
412 if (IS_ERR(rst))
413 return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
414
415 reset_control_assert(rst);
416 udelay(2);
417 reset_control_deassert(rst);
418 }
419
420 platform_set_drvdata(pdev, rk_rng);
421
422 rk_rng->rng.name = dev_driver_string(dev);
423 if (!IS_ENABLED(CONFIG_PM)) {
424 rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
425 rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
426 }
427 rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
428 rk_rng->dev = dev;
429 rk_rng->rng.quality = rk_rng->soc_data->quality;
430
431 pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
432 pm_runtime_use_autosuspend(dev);
433 ret = devm_pm_runtime_enable(dev);
434 if (ret)
435 return dev_err_probe(dev, ret, "Runtime pm activation failed.\n");
436
437 ret = devm_hwrng_register(dev, &rk_rng->rng);
438 if (ret)
439 return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n");
440
441 return 0;
442 }
443
rk_rng_runtime_suspend(struct device * dev)444 static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
445 {
446 struct rk_rng *rk_rng = dev_get_drvdata(dev);
447
448 rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
449
450 return 0;
451 }
452
rk_rng_runtime_resume(struct device * dev)453 static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
454 {
455 struct rk_rng *rk_rng = dev_get_drvdata(dev);
456
457 return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);
458 }
459
460 static const struct dev_pm_ops rk_rng_pm_ops = {
461 SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend,
462 rk_rng_runtime_resume, NULL)
463 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
464 pm_runtime_force_resume)
465 };
466
467 static const struct of_device_id rk_rng_dt_match[] = {
468 { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
469 { .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
470 { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
471 { /* sentinel */ },
472 };
473
474 MODULE_DEVICE_TABLE(of, rk_rng_dt_match);
475
476 static struct platform_driver rk_rng_driver = {
477 .driver = {
478 .name = "rockchip-rng",
479 .pm = &rk_rng_pm_ops,
480 .of_match_table = rk_rng_dt_match,
481 },
482 .probe = rk_rng_probe,
483 };
484
485 module_platform_driver(rk_rng_driver);
486
487 MODULE_DESCRIPTION("Rockchip True Random Number Generator driver");
488 MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
489 MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
490 MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
491 MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
492 MODULE_LICENSE("GPL");
493