1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs
4  *
5  * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
6  * Copyright (c) 2022, Aurelien Jarno
7  * Copyright (c) 2025, Collabora Ltd.
8  * Authors:
9  *  Lin Jinhan <troy.lin@rock-chips.com>
10  *  Aurelien Jarno <aurelien@aurel32.net>
11  *  Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
12  */
13 #include <linux/clk.h>
14 #include <linux/hw_random.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23 #include <linux/slab.h>
24 
25 #define RK_RNG_AUTOSUSPEND_DELAY	100
26 #define RK_RNG_MAX_BYTE			32
27 #define RK_RNG_POLL_PERIOD_US		100
28 #define RK_RNG_POLL_TIMEOUT_US		10000
29 
30 /*
31  * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is
32  * a tradeoff between speed and quality and has been adjusted to get a quality
33  * of ~900 (~87.5% of FIPS 140-2 successes).
34  */
35 #define RK_RNG_SAMPLE_CNT		1000
36 
37 /* after how many bytes of output TRNGv1 implementations should be reseeded */
38 #define RK_TRNG_V1_AUTO_RESEED_CNT	16000
39 
40 /* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
41 #define TRNG_RST_CTL			0x0004
42 #define TRNG_RNG_CTL			0x0400
43 #define TRNG_RNG_CTL_LEN_64_BIT		(0x00 << 4)
44 #define TRNG_RNG_CTL_LEN_128_BIT	(0x01 << 4)
45 #define TRNG_RNG_CTL_LEN_192_BIT	(0x02 << 4)
46 #define TRNG_RNG_CTL_LEN_256_BIT	(0x03 << 4)
47 #define TRNG_RNG_CTL_OSC_RING_SPEED_0	(0x00 << 2)
48 #define TRNG_RNG_CTL_OSC_RING_SPEED_1	(0x01 << 2)
49 #define TRNG_RNG_CTL_OSC_RING_SPEED_2	(0x02 << 2)
50 #define TRNG_RNG_CTL_OSC_RING_SPEED_3	(0x03 << 2)
51 #define TRNG_RNG_CTL_MASK		GENMASK(15, 0)
52 #define TRNG_RNG_CTL_ENABLE		BIT(1)
53 #define TRNG_RNG_CTL_START		BIT(0)
54 #define TRNG_RNG_SAMPLE_CNT		0x0404
55 #define TRNG_RNG_DOUT			0x0410
56 
57 /*
58  * TRNG V1 register definitions
59  * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP)
60  * and can be found in the Rockchip RK3588 SoC
61  */
62 #define TRNG_V1_CTRL				0x0000
63 #define TRNG_V1_CTRL_NOP			0x00
64 #define TRNG_V1_CTRL_RAND			0x01
65 #define TRNG_V1_CTRL_SEED			0x02
66 
67 #define TRNG_V1_STAT				0x0004
68 #define TRNG_V1_STAT_SEEDED			BIT(9)
69 #define TRNG_V1_STAT_GENERATING			BIT(30)
70 #define TRNG_V1_STAT_RESEEDING			BIT(31)
71 
72 #define TRNG_V1_MODE				0x0008
73 #define TRNG_V1_MODE_128_BIT			(0x00 << 3)
74 #define TRNG_V1_MODE_256_BIT			(0x01 << 3)
75 
76 /* Interrupt Enable register; unused because polling is faster */
77 #define TRNG_V1_IE				0x0010
78 #define TRNG_V1_IE_GLBL_EN			BIT(31)
79 #define TRNG_V1_IE_SEED_DONE_EN			BIT(1)
80 #define TRNG_V1_IE_RAND_RDY_EN			BIT(0)
81 
82 #define TRNG_V1_ISTAT				0x0014
83 #define TRNG_V1_ISTAT_RAND_RDY			BIT(0)
84 
85 /* RAND0 ~ RAND7 */
86 #define TRNG_V1_RAND0				0x0020
87 #define TRNG_V1_RAND7				0x003C
88 
89 /* Auto Reseed Register */
90 #define TRNG_V1_AUTO_RQSTS			0x0060
91 
92 #define TRNG_V1_VERSION				0x00F0
93 #define TRNG_v1_VERSION_CODE			0x46bc
94 /* end of TRNG_V1 register definitions */
95 
96 /*
97  * RKRNG register definitions
98  * The RKRNG IP is a stand-alone TRNG implementation (not part of a crypto IP)
99  * and can be found in the Rockchip RK3576, Rockchip RK3562 and Rockchip RK3528
100  * SoCs. It can either output true randomness (TRNG) or "deterministic"
101  * randomness derived from hashing the true entropy (DRNG). This driver
102  * implementation uses just the true entropy, and leaves stretching the entropy
103  * up to Linux.
104  */
105 #define RKRNG_CFG				0x0000
106 #define RKRNG_CTRL				0x0010
107 #define RKRNG_CTRL_REQ_TRNG			BIT(4)
108 #define RKRNG_STATE				0x0014
109 #define RKRNG_STATE_TRNG_RDY			BIT(4)
110 #define RKRNG_TRNG_DATA0			0x0050
111 #define RKRNG_TRNG_DATA1			0x0054
112 #define RKRNG_TRNG_DATA2			0x0058
113 #define RKRNG_TRNG_DATA3			0x005C
114 #define RKRNG_TRNG_DATA4			0x0060
115 #define RKRNG_TRNG_DATA5			0x0064
116 #define RKRNG_TRNG_DATA6			0x0068
117 #define RKRNG_TRNG_DATA7			0x006C
118 #define RKRNG_READ_LEN				32
119 
120 /* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
121 static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
122 	      "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
123 
124 struct rk_rng {
125 	struct hwrng rng;
126 	void __iomem *base;
127 	int clk_num;
128 	struct clk_bulk_data *clk_bulks;
129 	const struct rk_rng_soc_data *soc_data;
130 	struct device *dev;
131 };
132 
133 struct rk_rng_soc_data {
134 	int (*rk_rng_init)(struct hwrng *rng);
135 	int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait);
136 	void (*rk_rng_cleanup)(struct hwrng *rng);
137 	unsigned short quality;
138 	bool reset_optional;
139 };
140 
141 /* The mask in the upper 16 bits determines the bits that are updated */
142 static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
143 {
144 	writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
145 }
146 
147 static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
148 {
149 	writel(val, rng->base + offset);
150 }
151 
152 static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
153 {
154 	return readl(rng->base + offset);
155 }
156 
157 static int rk_rng_enable_clks(struct rk_rng *rk_rng)
158 {
159 	int ret;
160 	/* start clocks */
161 	ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
162 	if (ret < 0) {
163 		dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
164 		return ret;
165 	}
166 
167 	return 0;
168 }
169 
170 static int rk3568_rng_init(struct hwrng *rng)
171 {
172 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
173 	int ret;
174 
175 	ret = rk_rng_enable_clks(rk_rng);
176 	if (ret < 0)
177 		return ret;
178 
179 	/* set the sample period */
180 	writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
181 
182 	/* set osc ring speed and enable it */
183 	rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
184 				 TRNG_RNG_CTL_OSC_RING_SPEED_0 |
185 				 TRNG_RNG_CTL_ENABLE,
186 			 TRNG_RNG_CTL_MASK);
187 
188 	return 0;
189 }
190 
191 static void rk3568_rng_cleanup(struct hwrng *rng)
192 {
193 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
194 
195 	/* stop TRNG */
196 	rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
197 
198 	/* stop clocks */
199 	clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
200 }
201 
202 static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
203 {
204 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
205 	size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
206 	u32 reg;
207 	int ret = 0;
208 
209 	ret = pm_runtime_resume_and_get(rk_rng->dev);
210 	if (ret < 0)
211 		return ret;
212 
213 	/* Start collecting random data */
214 	rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
215 
216 	ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
217 				 !(reg & TRNG_RNG_CTL_START),
218 				 RK_RNG_POLL_PERIOD_US,
219 				 RK_RNG_POLL_TIMEOUT_US);
220 	if (ret < 0)
221 		goto out;
222 
223 	/* Read random data stored in the registers */
224 	memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
225 out:
226 	pm_runtime_mark_last_busy(rk_rng->dev);
227 	pm_runtime_put_sync_autosuspend(rk_rng->dev);
228 
229 	return (ret < 0) ? ret : to_read;
230 }
231 
232 static int rk3576_rng_init(struct hwrng *rng)
233 {
234 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
235 
236 	return rk_rng_enable_clks(rk_rng);
237 }
238 
239 static int rk3576_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
240 {
241 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
242 	size_t to_read = min_t(size_t, max, RKRNG_READ_LEN);
243 	int ret = 0;
244 	u32 val;
245 
246 	ret = pm_runtime_resume_and_get(rk_rng->dev);
247 	if (ret < 0)
248 		return ret;
249 
250 	rk_rng_writel(rk_rng, RKRNG_CTRL_REQ_TRNG | (RKRNG_CTRL_REQ_TRNG << 16),
251 		      RKRNG_CTRL);
252 
253 	if (readl_poll_timeout(rk_rng->base + RKRNG_STATE, val,
254 			       (val & RKRNG_STATE_TRNG_RDY), RK_RNG_POLL_PERIOD_US,
255 			       RK_RNG_POLL_TIMEOUT_US)) {
256 		dev_err(rk_rng->dev, "timed out waiting for data\n");
257 		ret = -ETIMEDOUT;
258 		goto out;
259 	}
260 
261 	rk_rng_writel(rk_rng, RKRNG_STATE_TRNG_RDY, RKRNG_STATE);
262 
263 	memcpy_fromio(buf, rk_rng->base + RKRNG_TRNG_DATA0, to_read);
264 
265 out:
266 	pm_runtime_mark_last_busy(rk_rng->dev);
267 	pm_runtime_put_sync_autosuspend(rk_rng->dev);
268 
269 	return (ret < 0) ? ret : to_read;
270 }
271 
272 static int rk3588_rng_init(struct hwrng *rng)
273 {
274 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
275 	u32 version, status, mask, istat;
276 	int ret;
277 
278 	ret = rk_rng_enable_clks(rk_rng);
279 	if (ret < 0)
280 		return ret;
281 
282 	version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
283 	if (version != TRNG_v1_VERSION_CODE) {
284 		dev_err(rk_rng->dev,
285 			"wrong trng version, expected = %08x, actual = %08x\n",
286 			TRNG_V1_VERSION, version);
287 		ret = -EFAULT;
288 		goto err_disable_clk;
289 	}
290 
291 	mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING |
292 	       TRNG_V1_STAT_RESEEDING;
293 	if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
294 			       (status & mask) == TRNG_V1_STAT_SEEDED,
295 			       RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) {
296 		dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
297 		ret = -ETIMEDOUT;
298 		goto err_disable_clk;
299 	}
300 
301 	/*
302 	 * clear ISTAT flag, downstream advises to do this to avoid
303 	 * auto-reseeding "on power on"
304 	 */
305 	istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
306 	rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
307 
308 	/* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */
309 	rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
310 
311 	return 0;
312 err_disable_clk:
313 	clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
314 	return ret;
315 }
316 
317 static void rk3588_rng_cleanup(struct hwrng *rng)
318 {
319 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
320 
321 	clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
322 }
323 
324 static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
325 {
326 	struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
327 	size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
328 	int ret = 0;
329 	u32 reg;
330 
331 	ret = pm_runtime_resume_and_get(rk_rng->dev);
332 	if (ret < 0)
333 		return ret;
334 
335 	/* Clear ISTAT, even without interrupts enabled, this will be updated */
336 	reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
337 	rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
338 
339 	/* generate 256 bits of random data */
340 	rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
341 	rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
342 
343 	ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
344 					(reg & TRNG_V1_ISTAT_RAND_RDY), 0,
345 					RK_RNG_POLL_TIMEOUT_US);
346 	if (ret < 0)
347 		goto out;
348 
349 	/* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */
350 	memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
351 
352 out:
353 	/* Clear ISTAT */
354 	rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
355 	/* close the TRNG */
356 	rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
357 
358 	pm_runtime_mark_last_busy(rk_rng->dev);
359 	pm_runtime_put_sync_autosuspend(rk_rng->dev);
360 
361 	return (ret < 0) ? ret : to_read;
362 }
363 
364 static const struct rk_rng_soc_data rk3568_soc_data = {
365 	.rk_rng_init = rk3568_rng_init,
366 	.rk_rng_read = rk3568_rng_read,
367 	.rk_rng_cleanup = rk3568_rng_cleanup,
368 	.quality = 900,
369 	.reset_optional = false,
370 };
371 
372 static const struct rk_rng_soc_data rk3576_soc_data = {
373 	.rk_rng_init = rk3576_rng_init,
374 	.rk_rng_read = rk3576_rng_read,
375 	.rk_rng_cleanup = rk3588_rng_cleanup,
376 	.quality = 999,		/* as determined by actual testing */
377 	.reset_optional = true,
378 };
379 
380 static const struct rk_rng_soc_data rk3588_soc_data = {
381 	.rk_rng_init = rk3588_rng_init,
382 	.rk_rng_read = rk3588_rng_read,
383 	.rk_rng_cleanup = rk3588_rng_cleanup,
384 	.quality = 999,		/* as determined by actual testing */
385 	.reset_optional = true,
386 };
387 
388 static int rk_rng_probe(struct platform_device *pdev)
389 {
390 	struct device *dev = &pdev->dev;
391 	struct reset_control *rst;
392 	struct rk_rng *rk_rng;
393 	int ret;
394 
395 	rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
396 	if (!rk_rng)
397 		return -ENOMEM;
398 
399 	rk_rng->soc_data = of_device_get_match_data(dev);
400 	rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
401 	if (IS_ERR(rk_rng->base))
402 		return PTR_ERR(rk_rng->base);
403 
404 	rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
405 	if (rk_rng->clk_num < 0)
406 		return dev_err_probe(dev, rk_rng->clk_num,
407 				     "Failed to get clks property\n");
408 
409 	if (rk_rng->soc_data->reset_optional)
410 		rst = devm_reset_control_array_get_optional_exclusive(dev);
411 	else
412 		rst = devm_reset_control_array_get_exclusive(dev);
413 
414 	if (rst) {
415 		if (IS_ERR(rst))
416 			return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
417 
418 		reset_control_assert(rst);
419 		udelay(2);
420 		reset_control_deassert(rst);
421 	}
422 
423 	platform_set_drvdata(pdev, rk_rng);
424 
425 	rk_rng->rng.name = dev_driver_string(dev);
426 	if (!IS_ENABLED(CONFIG_PM)) {
427 		rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
428 		rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
429 	}
430 	rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
431 	rk_rng->dev = dev;
432 	rk_rng->rng.quality = rk_rng->soc_data->quality;
433 
434 	pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
435 	pm_runtime_use_autosuspend(dev);
436 	ret = devm_pm_runtime_enable(dev);
437 	if (ret)
438 		return dev_err_probe(dev, ret, "Runtime pm activation failed.\n");
439 
440 	ret = devm_hwrng_register(dev, &rk_rng->rng);
441 	if (ret)
442 		return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n");
443 
444 	return 0;
445 }
446 
447 static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
448 {
449 	struct rk_rng *rk_rng = dev_get_drvdata(dev);
450 
451 	rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
452 
453 	return 0;
454 }
455 
456 static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
457 {
458 	struct rk_rng *rk_rng = dev_get_drvdata(dev);
459 
460 	return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);
461 }
462 
463 static const struct dev_pm_ops rk_rng_pm_ops = {
464 	SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend,
465 				rk_rng_runtime_resume, NULL)
466 	SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
467 				pm_runtime_force_resume)
468 };
469 
470 static const struct of_device_id rk_rng_dt_match[] = {
471 	{ .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
472 	{ .compatible = "rockchip,rk3576-rng", .data = (void *)&rk3576_soc_data },
473 	{ .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
474 	{ /* sentinel */ },
475 };
476 
477 MODULE_DEVICE_TABLE(of, rk_rng_dt_match);
478 
479 static struct platform_driver rk_rng_driver = {
480 	.driver	= {
481 		.name	= "rockchip-rng",
482 		.pm	= &rk_rng_pm_ops,
483 		.of_match_table = rk_rng_dt_match,
484 	},
485 	.probe	= rk_rng_probe,
486 };
487 
488 module_platform_driver(rk_rng_driver);
489 
490 MODULE_DESCRIPTION("Rockchip True Random Number Generator driver");
491 MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
492 MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
493 MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
494 MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
495 MODULE_LICENSE("GPL");
496