1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * rockchip-rng.c True Random Number Generator driver for Rockchip SoCs
4 *
5 * Copyright (c) 2018, Fuzhou Rockchip Electronics Co., Ltd.
6 * Copyright (c) 2022, Aurelien Jarno
7 * Copyright (c) 2025, Collabora Ltd.
8 * Authors:
9 * Lin Jinhan <troy.lin@rock-chips.com>
10 * Aurelien Jarno <aurelien@aurel32.net>
11 * Nicolas Frattaroli <nicolas.frattaroli@collabora.com>
12 */
13 #include <linux/clk.h>
14 #include <linux/hw_random.h>
15 #include <linux/io.h>
16 #include <linux/iopoll.h>
17 #include <linux/kernel.h>
18 #include <linux/module.h>
19 #include <linux/of.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_runtime.h>
22 #include <linux/reset.h>
23 #include <linux/slab.h>
24
25 #define RK_RNG_AUTOSUSPEND_DELAY 100
26 #define RK_RNG_MAX_BYTE 32
27 #define RK_RNG_POLL_PERIOD_US 100
28 #define RK_RNG_POLL_TIMEOUT_US 10000
29
30 /*
31 * TRNG collects osc ring output bit every RK_RNG_SAMPLE_CNT time. The value is
32 * a tradeoff between speed and quality and has been adjusted to get a quality
33 * of ~900 (~87.5% of FIPS 140-2 successes).
34 */
35 #define RK_RNG_SAMPLE_CNT 1000
36
37 /* after how many bytes of output TRNGv1 implementations should be reseeded */
38 #define RK_TRNG_V1_AUTO_RESEED_CNT 16000
39
40 /* TRNG registers from RK3568 TRM-Part2, section 5.4.1 */
41 #define TRNG_RST_CTL 0x0004
42 #define TRNG_RNG_CTL 0x0400
43 #define TRNG_RNG_CTL_LEN_64_BIT (0x00 << 4)
44 #define TRNG_RNG_CTL_LEN_128_BIT (0x01 << 4)
45 #define TRNG_RNG_CTL_LEN_192_BIT (0x02 << 4)
46 #define TRNG_RNG_CTL_LEN_256_BIT (0x03 << 4)
47 #define TRNG_RNG_CTL_OSC_RING_SPEED_0 (0x00 << 2)
48 #define TRNG_RNG_CTL_OSC_RING_SPEED_1 (0x01 << 2)
49 #define TRNG_RNG_CTL_OSC_RING_SPEED_2 (0x02 << 2)
50 #define TRNG_RNG_CTL_OSC_RING_SPEED_3 (0x03 << 2)
51 #define TRNG_RNG_CTL_MASK GENMASK(15, 0)
52 #define TRNG_RNG_CTL_ENABLE BIT(1)
53 #define TRNG_RNG_CTL_START BIT(0)
54 #define TRNG_RNG_SAMPLE_CNT 0x0404
55 #define TRNG_RNG_DOUT 0x0410
56
57 /*
58 * TRNG V1 register definitions
59 * The TRNG V1 IP is a stand-alone TRNG implementation (not part of a crypto IP)
60 * and can be found in the Rockchip RK3588 SoC
61 */
62 #define TRNG_V1_CTRL 0x0000
63 #define TRNG_V1_CTRL_NOP 0x00
64 #define TRNG_V1_CTRL_RAND 0x01
65 #define TRNG_V1_CTRL_SEED 0x02
66
67 #define TRNG_V1_STAT 0x0004
68 #define TRNG_V1_STAT_SEEDED BIT(9)
69 #define TRNG_V1_STAT_GENERATING BIT(30)
70 #define TRNG_V1_STAT_RESEEDING BIT(31)
71
72 #define TRNG_V1_MODE 0x0008
73 #define TRNG_V1_MODE_128_BIT (0x00 << 3)
74 #define TRNG_V1_MODE_256_BIT (0x01 << 3)
75
76 /* Interrupt Enable register; unused because polling is faster */
77 #define TRNG_V1_IE 0x0010
78 #define TRNG_V1_IE_GLBL_EN BIT(31)
79 #define TRNG_V1_IE_SEED_DONE_EN BIT(1)
80 #define TRNG_V1_IE_RAND_RDY_EN BIT(0)
81
82 #define TRNG_V1_ISTAT 0x0014
83 #define TRNG_V1_ISTAT_RAND_RDY BIT(0)
84
85 /* RAND0 ~ RAND7 */
86 #define TRNG_V1_RAND0 0x0020
87 #define TRNG_V1_RAND7 0x003C
88
89 /* Auto Reseed Register */
90 #define TRNG_V1_AUTO_RQSTS 0x0060
91
92 #define TRNG_V1_VERSION 0x00F0
93 #define TRNG_v1_VERSION_CODE 0x46bc
94 /* end of TRNG_V1 register definitions */
95
96 /* Before removing this assert, give rk3588_rng_read an upper bound of 32 */
97 static_assert(RK_RNG_MAX_BYTE <= (TRNG_V1_RAND7 + 4 - TRNG_V1_RAND0),
98 "You raised RK_RNG_MAX_BYTE and broke rk3588-rng, congrats.");
99
100 struct rk_rng {
101 struct hwrng rng;
102 void __iomem *base;
103 int clk_num;
104 struct clk_bulk_data *clk_bulks;
105 const struct rk_rng_soc_data *soc_data;
106 struct device *dev;
107 };
108
109 struct rk_rng_soc_data {
110 int (*rk_rng_init)(struct hwrng *rng);
111 int (*rk_rng_read)(struct hwrng *rng, void *buf, size_t max, bool wait);
112 void (*rk_rng_cleanup)(struct hwrng *rng);
113 unsigned short quality;
114 bool reset_optional;
115 };
116
117 /* The mask in the upper 16 bits determines the bits that are updated */
rk_rng_write_ctl(struct rk_rng * rng,u32 val,u32 mask)118 static void rk_rng_write_ctl(struct rk_rng *rng, u32 val, u32 mask)
119 {
120 writel((mask << 16) | val, rng->base + TRNG_RNG_CTL);
121 }
122
rk_rng_writel(struct rk_rng * rng,u32 val,u32 offset)123 static inline void rk_rng_writel(struct rk_rng *rng, u32 val, u32 offset)
124 {
125 writel(val, rng->base + offset);
126 }
127
rk_rng_readl(struct rk_rng * rng,u32 offset)128 static inline u32 rk_rng_readl(struct rk_rng *rng, u32 offset)
129 {
130 return readl(rng->base + offset);
131 }
132
rk_rng_enable_clks(struct rk_rng * rk_rng)133 static int rk_rng_enable_clks(struct rk_rng *rk_rng)
134 {
135 int ret;
136 /* start clocks */
137 ret = clk_bulk_prepare_enable(rk_rng->clk_num, rk_rng->clk_bulks);
138 if (ret < 0) {
139 dev_err(rk_rng->dev, "Failed to enable clocks: %d\n", ret);
140 return ret;
141 }
142
143 return 0;
144 }
145
rk3568_rng_init(struct hwrng * rng)146 static int rk3568_rng_init(struct hwrng *rng)
147 {
148 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
149 int ret;
150
151 ret = rk_rng_enable_clks(rk_rng);
152 if (ret < 0)
153 return ret;
154
155 /* set the sample period */
156 writel(RK_RNG_SAMPLE_CNT, rk_rng->base + TRNG_RNG_SAMPLE_CNT);
157
158 /* set osc ring speed and enable it */
159 rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_LEN_256_BIT |
160 TRNG_RNG_CTL_OSC_RING_SPEED_0 |
161 TRNG_RNG_CTL_ENABLE,
162 TRNG_RNG_CTL_MASK);
163
164 return 0;
165 }
166
rk3568_rng_cleanup(struct hwrng * rng)167 static void rk3568_rng_cleanup(struct hwrng *rng)
168 {
169 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
170
171 /* stop TRNG */
172 rk_rng_write_ctl(rk_rng, 0, TRNG_RNG_CTL_MASK);
173
174 /* stop clocks */
175 clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
176 }
177
rk3568_rng_read(struct hwrng * rng,void * buf,size_t max,bool wait)178 static int rk3568_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
179 {
180 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
181 size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
182 u32 reg;
183 int ret = 0;
184
185 ret = pm_runtime_resume_and_get(rk_rng->dev);
186 if (ret < 0)
187 return ret;
188
189 /* Start collecting random data */
190 rk_rng_write_ctl(rk_rng, TRNG_RNG_CTL_START, TRNG_RNG_CTL_START);
191
192 ret = readl_poll_timeout(rk_rng->base + TRNG_RNG_CTL, reg,
193 !(reg & TRNG_RNG_CTL_START),
194 RK_RNG_POLL_PERIOD_US,
195 RK_RNG_POLL_TIMEOUT_US);
196 if (ret < 0)
197 goto out;
198
199 /* Read random data stored in the registers */
200 memcpy_fromio(buf, rk_rng->base + TRNG_RNG_DOUT, to_read);
201 out:
202 pm_runtime_mark_last_busy(rk_rng->dev);
203 pm_runtime_put_sync_autosuspend(rk_rng->dev);
204
205 return (ret < 0) ? ret : to_read;
206 }
207
rk3588_rng_init(struct hwrng * rng)208 static int rk3588_rng_init(struct hwrng *rng)
209 {
210 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
211 u32 version, status, mask, istat;
212 int ret;
213
214 ret = rk_rng_enable_clks(rk_rng);
215 if (ret < 0)
216 return ret;
217
218 version = rk_rng_readl(rk_rng, TRNG_V1_VERSION);
219 if (version != TRNG_v1_VERSION_CODE) {
220 dev_err(rk_rng->dev,
221 "wrong trng version, expected = %08x, actual = %08x\n",
222 TRNG_V1_VERSION, version);
223 ret = -EFAULT;
224 goto err_disable_clk;
225 }
226
227 mask = TRNG_V1_STAT_SEEDED | TRNG_V1_STAT_GENERATING |
228 TRNG_V1_STAT_RESEEDING;
229 if (readl_poll_timeout(rk_rng->base + TRNG_V1_STAT, status,
230 (status & mask) == TRNG_V1_STAT_SEEDED,
231 RK_RNG_POLL_PERIOD_US, RK_RNG_POLL_TIMEOUT_US) < 0) {
232 dev_err(rk_rng->dev, "timed out waiting for hwrng to reseed\n");
233 ret = -ETIMEDOUT;
234 goto err_disable_clk;
235 }
236
237 /*
238 * clear ISTAT flag, downstream advises to do this to avoid
239 * auto-reseeding "on power on"
240 */
241 istat = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
242 rk_rng_writel(rk_rng, istat, TRNG_V1_ISTAT);
243
244 /* auto reseed after RK_TRNG_V1_AUTO_RESEED_CNT bytes */
245 rk_rng_writel(rk_rng, RK_TRNG_V1_AUTO_RESEED_CNT / 16, TRNG_V1_AUTO_RQSTS);
246
247 return 0;
248 err_disable_clk:
249 clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
250 return ret;
251 }
252
rk3588_rng_cleanup(struct hwrng * rng)253 static void rk3588_rng_cleanup(struct hwrng *rng)
254 {
255 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
256
257 clk_bulk_disable_unprepare(rk_rng->clk_num, rk_rng->clk_bulks);
258 }
259
rk3588_rng_read(struct hwrng * rng,void * buf,size_t max,bool wait)260 static int rk3588_rng_read(struct hwrng *rng, void *buf, size_t max, bool wait)
261 {
262 struct rk_rng *rk_rng = container_of(rng, struct rk_rng, rng);
263 size_t to_read = min_t(size_t, max, RK_RNG_MAX_BYTE);
264 int ret = 0;
265 u32 reg;
266
267 ret = pm_runtime_resume_and_get(rk_rng->dev);
268 if (ret < 0)
269 return ret;
270
271 /* Clear ISTAT, even without interrupts enabled, this will be updated */
272 reg = rk_rng_readl(rk_rng, TRNG_V1_ISTAT);
273 rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
274
275 /* generate 256 bits of random data */
276 rk_rng_writel(rk_rng, TRNG_V1_MODE_256_BIT, TRNG_V1_MODE);
277 rk_rng_writel(rk_rng, TRNG_V1_CTRL_RAND, TRNG_V1_CTRL);
278
279 ret = readl_poll_timeout_atomic(rk_rng->base + TRNG_V1_ISTAT, reg,
280 (reg & TRNG_V1_ISTAT_RAND_RDY), 0,
281 RK_RNG_POLL_TIMEOUT_US);
282 if (ret < 0)
283 goto out;
284
285 /* Read random data that's in registers TRNG_V1_RAND0 through RAND7 */
286 memcpy_fromio(buf, rk_rng->base + TRNG_V1_RAND0, to_read);
287
288 out:
289 /* Clear ISTAT */
290 rk_rng_writel(rk_rng, reg, TRNG_V1_ISTAT);
291 /* close the TRNG */
292 rk_rng_writel(rk_rng, TRNG_V1_CTRL_NOP, TRNG_V1_CTRL);
293
294 pm_runtime_mark_last_busy(rk_rng->dev);
295 pm_runtime_put_sync_autosuspend(rk_rng->dev);
296
297 return (ret < 0) ? ret : to_read;
298 }
299
300 static const struct rk_rng_soc_data rk3568_soc_data = {
301 .rk_rng_init = rk3568_rng_init,
302 .rk_rng_read = rk3568_rng_read,
303 .rk_rng_cleanup = rk3568_rng_cleanup,
304 .quality = 900,
305 .reset_optional = false,
306 };
307
308 static const struct rk_rng_soc_data rk3588_soc_data = {
309 .rk_rng_init = rk3588_rng_init,
310 .rk_rng_read = rk3588_rng_read,
311 .rk_rng_cleanup = rk3588_rng_cleanup,
312 .quality = 999, /* as determined by actual testing */
313 .reset_optional = true,
314 };
315
rk_rng_probe(struct platform_device * pdev)316 static int rk_rng_probe(struct platform_device *pdev)
317 {
318 struct device *dev = &pdev->dev;
319 struct reset_control *rst;
320 struct rk_rng *rk_rng;
321 int ret;
322
323 rk_rng = devm_kzalloc(dev, sizeof(*rk_rng), GFP_KERNEL);
324 if (!rk_rng)
325 return -ENOMEM;
326
327 rk_rng->soc_data = of_device_get_match_data(dev);
328 rk_rng->base = devm_platform_ioremap_resource(pdev, 0);
329 if (IS_ERR(rk_rng->base))
330 return PTR_ERR(rk_rng->base);
331
332 rk_rng->clk_num = devm_clk_bulk_get_all(dev, &rk_rng->clk_bulks);
333 if (rk_rng->clk_num < 0)
334 return dev_err_probe(dev, rk_rng->clk_num,
335 "Failed to get clks property\n");
336
337 if (rk_rng->soc_data->reset_optional)
338 rst = devm_reset_control_array_get_optional_exclusive(dev);
339 else
340 rst = devm_reset_control_array_get_exclusive(dev);
341
342 if (rst) {
343 if (IS_ERR(rst))
344 return dev_err_probe(dev, PTR_ERR(rst), "Failed to get reset property\n");
345
346 reset_control_assert(rst);
347 udelay(2);
348 reset_control_deassert(rst);
349 }
350
351 platform_set_drvdata(pdev, rk_rng);
352
353 rk_rng->rng.name = dev_driver_string(dev);
354 if (!IS_ENABLED(CONFIG_PM)) {
355 rk_rng->rng.init = rk_rng->soc_data->rk_rng_init;
356 rk_rng->rng.cleanup = rk_rng->soc_data->rk_rng_cleanup;
357 }
358 rk_rng->rng.read = rk_rng->soc_data->rk_rng_read;
359 rk_rng->dev = dev;
360 rk_rng->rng.quality = rk_rng->soc_data->quality;
361
362 pm_runtime_set_autosuspend_delay(dev, RK_RNG_AUTOSUSPEND_DELAY);
363 pm_runtime_use_autosuspend(dev);
364 ret = devm_pm_runtime_enable(dev);
365 if (ret)
366 return dev_err_probe(dev, ret, "Runtime pm activation failed.\n");
367
368 ret = devm_hwrng_register(dev, &rk_rng->rng);
369 if (ret)
370 return dev_err_probe(dev, ret, "Failed to register Rockchip hwrng\n");
371
372 return 0;
373 }
374
rk_rng_runtime_suspend(struct device * dev)375 static int __maybe_unused rk_rng_runtime_suspend(struct device *dev)
376 {
377 struct rk_rng *rk_rng = dev_get_drvdata(dev);
378
379 rk_rng->soc_data->rk_rng_cleanup(&rk_rng->rng);
380
381 return 0;
382 }
383
rk_rng_runtime_resume(struct device * dev)384 static int __maybe_unused rk_rng_runtime_resume(struct device *dev)
385 {
386 struct rk_rng *rk_rng = dev_get_drvdata(dev);
387
388 return rk_rng->soc_data->rk_rng_init(&rk_rng->rng);
389 }
390
391 static const struct dev_pm_ops rk_rng_pm_ops = {
392 SET_RUNTIME_PM_OPS(rk_rng_runtime_suspend,
393 rk_rng_runtime_resume, NULL)
394 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
395 pm_runtime_force_resume)
396 };
397
398 static const struct of_device_id rk_rng_dt_match[] = {
399 { .compatible = "rockchip,rk3568-rng", .data = (void *)&rk3568_soc_data },
400 { .compatible = "rockchip,rk3588-rng", .data = (void *)&rk3588_soc_data },
401 { /* sentinel */ },
402 };
403
404 MODULE_DEVICE_TABLE(of, rk_rng_dt_match);
405
406 static struct platform_driver rk_rng_driver = {
407 .driver = {
408 .name = "rockchip-rng",
409 .pm = &rk_rng_pm_ops,
410 .of_match_table = rk_rng_dt_match,
411 },
412 .probe = rk_rng_probe,
413 };
414
415 module_platform_driver(rk_rng_driver);
416
417 MODULE_DESCRIPTION("Rockchip True Random Number Generator driver");
418 MODULE_AUTHOR("Lin Jinhan <troy.lin@rock-chips.com>");
419 MODULE_AUTHOR("Aurelien Jarno <aurelien@aurel32.net>");
420 MODULE_AUTHOR("Daniel Golle <daniel@makrotopia.org>");
421 MODULE_AUTHOR("Nicolas Frattaroli <nicolas.frattaroli@collabora.com>");
422 MODULE_LICENSE("GPL");
423