1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Generic on-chip SRAM allocation driver
4 *
5 * Copyright (C) 2012 Philipp Zabel, Pengutronix
6 */
7
8 #include <linux/clk.h>
9 #include <linux/delay.h>
10 #include <linux/genalloc.h>
11 #include <linux/io.h>
12 #include <linux/list_sort.h>
13 #include <linux/of.h>
14 #include <linux/of_address.h>
15 #include <linux/platform_device.h>
16 #include <linux/regmap.h>
17 #include <linux/slab.h>
18 #include <linux/mfd/syscon.h>
19 #include <soc/at91/atmel-secumod.h>
20
21 #include "sram.h"
22
23 #define SRAM_GRANULARITY 32
24
sram_read(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t pos,size_t count)25 static ssize_t sram_read(struct file *filp, struct kobject *kobj,
26 const struct bin_attribute *attr,
27 char *buf, loff_t pos, size_t count)
28 {
29 struct sram_partition *part;
30
31 /* Cast away the const as the attribute is part of a larger structure */
32 part = (struct sram_partition *)container_of(attr, struct sram_partition, battr);
33
34 mutex_lock(&part->lock);
35 memcpy_fromio(buf, part->base + pos, count);
36 mutex_unlock(&part->lock);
37
38 return count;
39 }
40
sram_write(struct file * filp,struct kobject * kobj,const struct bin_attribute * attr,char * buf,loff_t pos,size_t count)41 static ssize_t sram_write(struct file *filp, struct kobject *kobj,
42 const struct bin_attribute *attr,
43 char *buf, loff_t pos, size_t count)
44 {
45 struct sram_partition *part;
46
47 /* Cast away the const as the attribute is part of a larger structure */
48 part = (struct sram_partition *)container_of(attr, struct sram_partition, battr);
49
50 mutex_lock(&part->lock);
51 memcpy_toio(part->base + pos, buf, count);
52 mutex_unlock(&part->lock);
53
54 return count;
55 }
56
sram_add_pool(struct sram_dev * sram,struct sram_reserve * block,phys_addr_t start,struct sram_partition * part)57 static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
58 phys_addr_t start, struct sram_partition *part)
59 {
60 int ret;
61
62 part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
63 NUMA_NO_NODE, block->label);
64 if (IS_ERR(part->pool))
65 return PTR_ERR(part->pool);
66
67 ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
68 block->size, NUMA_NO_NODE);
69 if (ret < 0) {
70 dev_err(sram->dev, "failed to register subpool: %d\n", ret);
71 return ret;
72 }
73
74 return 0;
75 }
76
sram_add_export(struct sram_dev * sram,struct sram_reserve * block,phys_addr_t start,struct sram_partition * part)77 static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
78 phys_addr_t start, struct sram_partition *part)
79 {
80 sysfs_bin_attr_init(&part->battr);
81 part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
82 "%llx.sram",
83 (unsigned long long)start);
84 if (!part->battr.attr.name)
85 return -ENOMEM;
86
87 part->battr.attr.mode = S_IRUSR | S_IWUSR;
88 part->battr.read = sram_read;
89 part->battr.write = sram_write;
90 part->battr.size = block->size;
91
92 return device_create_bin_file(sram->dev, &part->battr);
93 }
94
sram_add_partition(struct sram_dev * sram,struct sram_reserve * block,phys_addr_t start)95 static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
96 phys_addr_t start)
97 {
98 int ret;
99 struct sram_partition *part = &sram->partition[sram->partitions];
100
101 mutex_init(&part->lock);
102
103 if (sram->config && sram->config->map_only_reserved) {
104 void __iomem *virt_base;
105
106 if (sram->no_memory_wc)
107 virt_base = devm_ioremap_resource(sram->dev, &block->res);
108 else
109 virt_base = devm_ioremap_resource_wc(sram->dev, &block->res);
110
111 if (IS_ERR(virt_base)) {
112 dev_err(sram->dev, "could not map SRAM at %pr\n", &block->res);
113 return PTR_ERR(virt_base);
114 }
115
116 part->base = virt_base;
117 } else {
118 part->base = sram->virt_base + block->start;
119 }
120
121 if (block->pool) {
122 ret = sram_add_pool(sram, block, start, part);
123 if (ret)
124 return ret;
125 }
126 if (block->export) {
127 ret = sram_add_export(sram, block, start, part);
128 if (ret)
129 return ret;
130 }
131 if (block->protect_exec) {
132 ret = sram_check_protect_exec(sram, block, part);
133 if (ret)
134 return ret;
135
136 ret = sram_add_pool(sram, block, start, part);
137 if (ret)
138 return ret;
139
140 sram_add_protect_exec(part);
141 }
142
143 sram->partitions++;
144
145 return 0;
146 }
147
sram_free_partitions(struct sram_dev * sram)148 static void sram_free_partitions(struct sram_dev *sram)
149 {
150 struct sram_partition *part;
151
152 if (!sram->partitions)
153 return;
154
155 part = &sram->partition[sram->partitions - 1];
156 for (; sram->partitions; sram->partitions--, part--) {
157 if (part->battr.size)
158 device_remove_bin_file(sram->dev, &part->battr);
159
160 if (part->pool &&
161 gen_pool_avail(part->pool) < gen_pool_size(part->pool))
162 dev_err(sram->dev, "removed pool while SRAM allocated\n");
163 }
164 }
165
sram_reserve_cmp(void * priv,const struct list_head * a,const struct list_head * b)166 static int sram_reserve_cmp(void *priv, const struct list_head *a,
167 const struct list_head *b)
168 {
169 const struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
170 const struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
171
172 return ra->start - rb->start;
173 }
174
sram_reserve_regions(struct sram_dev * sram,struct resource * res)175 static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
176 {
177 struct device_node *np = sram->dev->of_node, *child;
178 unsigned long size, cur_start, cur_size;
179 struct sram_reserve *rblocks, *block;
180 struct list_head reserve_list;
181 unsigned int nblocks, exports = 0;
182 const char *label;
183 int ret = 0;
184
185 INIT_LIST_HEAD(&reserve_list);
186
187 size = resource_size(res);
188
189 /*
190 * We need an additional block to mark the end of the memory region
191 * after the reserved blocks from the dt are processed.
192 */
193 nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
194 rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL);
195 if (!rblocks)
196 return -ENOMEM;
197
198 block = &rblocks[0];
199 for_each_available_child_of_node(np, child) {
200 struct resource child_res;
201
202 ret = of_address_to_resource(child, 0, &child_res);
203 if (ret < 0) {
204 dev_err(sram->dev,
205 "could not get address for node %pOF\n",
206 child);
207 goto err_chunks;
208 }
209
210 if (child_res.start < res->start || child_res.end > res->end) {
211 dev_err(sram->dev,
212 "reserved block %pOF outside the sram area\n",
213 child);
214 ret = -EINVAL;
215 goto err_chunks;
216 }
217
218 block->start = child_res.start - res->start;
219 block->size = resource_size(&child_res);
220 block->res = child_res;
221 list_add_tail(&block->list, &reserve_list);
222
223 block->export = of_property_read_bool(child, "export");
224 block->pool = of_property_read_bool(child, "pool");
225 block->protect_exec = of_property_read_bool(child, "protect-exec");
226
227 if ((block->export || block->pool || block->protect_exec) &&
228 block->size) {
229 exports++;
230
231 label = NULL;
232 ret = of_property_read_string(child, "label", &label);
233 if (ret && ret != -EINVAL) {
234 dev_err(sram->dev,
235 "%pOF has invalid label name\n",
236 child);
237 goto err_chunks;
238 }
239 if (!label)
240 block->label = devm_kasprintf(sram->dev, GFP_KERNEL,
241 "%s", of_node_full_name(child));
242 else
243 block->label = devm_kstrdup(sram->dev,
244 label, GFP_KERNEL);
245 if (!block->label) {
246 ret = -ENOMEM;
247 goto err_chunks;
248 }
249
250 dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
251 block->export ? "exported " : "", block->label,
252 block->start, block->start + block->size);
253 } else {
254 dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
255 block->start, block->start + block->size);
256 }
257
258 block++;
259 }
260 child = NULL;
261
262 /* the last chunk marks the end of the region */
263 rblocks[nblocks - 1].start = size;
264 rblocks[nblocks - 1].size = 0;
265 list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
266
267 list_sort(NULL, &reserve_list, sram_reserve_cmp);
268
269 if (exports) {
270 sram->partition = devm_kcalloc(sram->dev,
271 exports, sizeof(*sram->partition),
272 GFP_KERNEL);
273 if (!sram->partition) {
274 ret = -ENOMEM;
275 goto err_chunks;
276 }
277 }
278
279 cur_start = 0;
280 list_for_each_entry(block, &reserve_list, list) {
281 /* can only happen if sections overlap */
282 if (block->start < cur_start) {
283 dev_err(sram->dev,
284 "block at 0x%x starts after current offset 0x%lx\n",
285 block->start, cur_start);
286 ret = -EINVAL;
287 sram_free_partitions(sram);
288 goto err_chunks;
289 }
290
291 if ((block->export || block->pool || block->protect_exec) &&
292 block->size) {
293 ret = sram_add_partition(sram, block,
294 res->start + block->start);
295 if (ret) {
296 sram_free_partitions(sram);
297 goto err_chunks;
298 }
299 }
300
301 /* current start is in a reserved block, so continue after it */
302 if (block->start == cur_start) {
303 cur_start = block->start + block->size;
304 continue;
305 }
306
307 /*
308 * allocate the space between the current starting
309 * address and the following reserved block, or the
310 * end of the region.
311 */
312 cur_size = block->start - cur_start;
313
314 if (sram->pool) {
315 dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
316 cur_start, cur_start + cur_size);
317
318 ret = gen_pool_add_virt(sram->pool,
319 (unsigned long)sram->virt_base + cur_start,
320 res->start + cur_start, cur_size, -1);
321 if (ret < 0) {
322 sram_free_partitions(sram);
323 goto err_chunks;
324 }
325 }
326
327 /* next allocation after this reserved block */
328 cur_start = block->start + block->size;
329 }
330
331 err_chunks:
332 of_node_put(child);
333 kfree(rblocks);
334
335 return ret;
336 }
337
atmel_securam_wait(void)338 static int atmel_securam_wait(void)
339 {
340 struct regmap *regmap;
341 u32 val;
342
343 regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
344 if (IS_ERR(regmap))
345 return -ENODEV;
346
347 return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
348 val & AT91_SECUMOD_RAMRDY_READY,
349 10000, 500000);
350 }
351
352 static const struct sram_config atmel_securam_config = {
353 .init = atmel_securam_wait,
354 };
355
356 /*
357 * SYSRAM contains areas that are not accessible by the
358 * kernel, such as the first 256K that is reserved for TZ.
359 * Accesses to those areas (including speculative accesses)
360 * trigger SErrors. As such we must map only the areas of
361 * SYSRAM specified in the device tree.
362 */
363 static const struct sram_config tegra_sysram_config = {
364 .map_only_reserved = true,
365 };
366
367 static const struct of_device_id sram_dt_ids[] = {
368 { .compatible = "mmio-sram" },
369 { .compatible = "atmel,sama5d2-securam", .data = &atmel_securam_config },
370 { .compatible = "nvidia,tegra186-sysram", .data = &tegra_sysram_config },
371 { .compatible = "nvidia,tegra194-sysram", .data = &tegra_sysram_config },
372 { .compatible = "nvidia,tegra234-sysram", .data = &tegra_sysram_config },
373 {}
374 };
375
sram_probe(struct platform_device * pdev)376 static int sram_probe(struct platform_device *pdev)
377 {
378 const struct sram_config *config;
379 struct sram_dev *sram;
380 int ret;
381 struct resource *res;
382 struct clk *clk;
383
384 config = of_device_get_match_data(&pdev->dev);
385
386 sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
387 if (!sram)
388 return -ENOMEM;
389
390 sram->dev = &pdev->dev;
391 sram->no_memory_wc = of_property_read_bool(pdev->dev.of_node, "no-memory-wc");
392 sram->config = config;
393
394 if (!config || !config->map_only_reserved) {
395 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
396 if (sram->no_memory_wc)
397 sram->virt_base = devm_ioremap_resource(&pdev->dev, res);
398 else
399 sram->virt_base = devm_ioremap_resource_wc(&pdev->dev, res);
400 if (IS_ERR(sram->virt_base)) {
401 dev_err(&pdev->dev, "could not map SRAM registers\n");
402 return PTR_ERR(sram->virt_base);
403 }
404
405 sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
406 NUMA_NO_NODE, NULL);
407 if (IS_ERR(sram->pool))
408 return PTR_ERR(sram->pool);
409 }
410
411 clk = devm_clk_get_optional_enabled(sram->dev, NULL);
412 if (IS_ERR(clk))
413 return PTR_ERR(clk);
414
415 ret = sram_reserve_regions(sram,
416 platform_get_resource(pdev, IORESOURCE_MEM, 0));
417 if (ret)
418 return ret;
419
420 platform_set_drvdata(pdev, sram);
421
422 if (config && config->init) {
423 ret = config->init();
424 if (ret)
425 goto err_free_partitions;
426 }
427
428 if (sram->pool)
429 dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
430 gen_pool_size(sram->pool) / 1024, sram->virt_base);
431
432 return 0;
433
434 err_free_partitions:
435 sram_free_partitions(sram);
436
437 return ret;
438 }
439
sram_remove(struct platform_device * pdev)440 static void sram_remove(struct platform_device *pdev)
441 {
442 struct sram_dev *sram = platform_get_drvdata(pdev);
443
444 sram_free_partitions(sram);
445
446 if (sram->pool && gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
447 dev_err(sram->dev, "removed while SRAM allocated\n");
448 }
449
450 static struct platform_driver sram_driver = {
451 .driver = {
452 .name = "sram",
453 .of_match_table = sram_dt_ids,
454 },
455 .probe = sram_probe,
456 .remove = sram_remove,
457 };
458
sram_init(void)459 static int __init sram_init(void)
460 {
461 return platform_driver_register(&sram_driver);
462 }
463
464 postcore_initcall(sram_init);
465