1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Memory allocator for buffers shared with the TrustZone.
4 *
5 * Copyright (C) 2023-2024 Linaro Ltd.
6 */
7
8 #include <linux/bug.h>
9 #include <linux/cleanup.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/err.h>
12 #include <linux/firmware/qcom/qcom_tzmem.h>
13 #include <linux/genalloc.h>
14 #include <linux/gfp.h>
15 #include <linux/kernel.h>
16 #include <linux/list.h>
17 #include <linux/mm.h>
18 #include <linux/radix-tree.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/types.h>
22
23 #include "qcom_scm.h"
24 #include "qcom_tzmem.h"
25
26 struct qcom_tzmem_area {
27 struct list_head list;
28 void *vaddr;
29 dma_addr_t paddr;
30 size_t size;
31 void *priv;
32 };
33
34 struct qcom_tzmem_pool {
35 struct gen_pool *genpool;
36 struct list_head areas;
37 enum qcom_tzmem_policy policy;
38 size_t increment;
39 size_t max_size;
40 spinlock_t lock;
41 };
42
43 struct qcom_tzmem_chunk {
44 size_t size;
45 struct qcom_tzmem_pool *owner;
46 };
47
48 static struct device *qcom_tzmem_dev;
49 static RADIX_TREE(qcom_tzmem_chunks, GFP_ATOMIC);
50 static DEFINE_SPINLOCK(qcom_tzmem_chunks_lock);
51
52 #if IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_GENERIC)
53
qcom_tzmem_init(void)54 static int qcom_tzmem_init(void)
55 {
56 return 0;
57 }
58
qcom_tzmem_init_area(struct qcom_tzmem_area * area)59 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
60 {
61 return 0;
62 }
63
qcom_tzmem_cleanup_area(struct qcom_tzmem_area * area)64 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
65 {
66
67 }
68
69 #elif IS_ENABLED(CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE)
70
71 #include <linux/firmware/qcom/qcom_scm.h>
72 #include <linux/of.h>
73
74 #define QCOM_SHM_BRIDGE_NUM_VM_SHIFT 9
75
76 static bool qcom_tzmem_using_shm_bridge;
77
78 /* List of machines that are known to not support SHM bridge correctly. */
79 static const char *const qcom_tzmem_blacklist[] = {
80 "qcom,sc7180", /* hang in rmtfs memory assignment */
81 "qcom,sc8180x",
82 "qcom,sdm670", /* failure in GPU firmware loading */
83 "qcom,sdm845", /* reset in rmtfs memory assignment */
84 "qcom,sm7150", /* reset in rmtfs memory assignment */
85 "qcom,sm8150", /* reset in rmtfs memory assignment */
86 NULL
87 };
88
qcom_tzmem_init(void)89 static int qcom_tzmem_init(void)
90 {
91 const char *const *platform;
92 int ret;
93
94 for (platform = qcom_tzmem_blacklist; *platform; platform++) {
95 if (of_machine_is_compatible(*platform))
96 goto notsupp;
97 }
98
99 ret = qcom_scm_shm_bridge_enable(qcom_tzmem_dev);
100 if (ret == -EOPNOTSUPP)
101 goto notsupp;
102
103 if (!ret)
104 qcom_tzmem_using_shm_bridge = true;
105
106 return ret;
107
108 notsupp:
109 dev_info(qcom_tzmem_dev, "SHM Bridge not supported\n");
110 return 0;
111 }
112
113 /**
114 * qcom_tzmem_shm_bridge_create() - Create a SHM bridge.
115 * @paddr: Physical address of the memory to share.
116 * @size: Size of the memory to share.
117 * @handle: Handle to the SHM bridge.
118 *
119 * On platforms that support SHM bridge, this function creates a SHM bridge
120 * for the given memory region with QTEE. The handle returned by this function
121 * must be passed to qcom_tzmem_shm_bridge_delete() to free the SHM bridge.
122 *
123 * Return: On success, returns 0; on failure, returns < 0.
124 */
qcom_tzmem_shm_bridge_create(phys_addr_t paddr,size_t size,u64 * handle)125 int qcom_tzmem_shm_bridge_create(phys_addr_t paddr, size_t size, u64 *handle)
126 {
127 u64 pfn_and_ns_perm, ipfn_and_s_perm, size_and_flags;
128 int ret;
129
130 if (!qcom_tzmem_using_shm_bridge)
131 return 0;
132
133 pfn_and_ns_perm = paddr | QCOM_SCM_PERM_RW;
134 ipfn_and_s_perm = paddr | QCOM_SCM_PERM_RW;
135 size_and_flags = size | (1 << QCOM_SHM_BRIDGE_NUM_VM_SHIFT);
136
137 ret = qcom_scm_shm_bridge_create(pfn_and_ns_perm, ipfn_and_s_perm,
138 size_and_flags, QCOM_SCM_VMID_HLOS,
139 handle);
140 if (ret) {
141 dev_err(qcom_tzmem_dev,
142 "SHM Bridge failed: ret %d paddr 0x%pa, size %zu\n",
143 ret, &paddr, size);
144
145 return ret;
146 }
147
148 return 0;
149 }
150 EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_create);
151
152 /**
153 * qcom_tzmem_shm_bridge_delete() - Delete a SHM bridge.
154 * @handle: Handle to the SHM bridge.
155 *
156 * On platforms that support SHM bridge, this function deletes the SHM bridge
157 * for the given memory region. The handle must be the same as the one
158 * returned by qcom_tzmem_shm_bridge_create().
159 */
qcom_tzmem_shm_bridge_delete(u64 handle)160 void qcom_tzmem_shm_bridge_delete(u64 handle)
161 {
162 if (qcom_tzmem_using_shm_bridge)
163 qcom_scm_shm_bridge_delete(handle);
164 }
165 EXPORT_SYMBOL_GPL(qcom_tzmem_shm_bridge_delete);
166
qcom_tzmem_init_area(struct qcom_tzmem_area * area)167 static int qcom_tzmem_init_area(struct qcom_tzmem_area *area)
168 {
169 int ret;
170
171 u64 *handle __free(kfree) = kzalloc(sizeof(*handle), GFP_KERNEL);
172 if (!handle)
173 return -ENOMEM;
174
175 ret = qcom_tzmem_shm_bridge_create(area->paddr, area->size, handle);
176 if (ret)
177 return ret;
178
179 area->priv = no_free_ptr(handle);
180
181 return 0;
182 }
183
qcom_tzmem_cleanup_area(struct qcom_tzmem_area * area)184 static void qcom_tzmem_cleanup_area(struct qcom_tzmem_area *area)
185 {
186 u64 *handle = area->priv;
187
188 qcom_tzmem_shm_bridge_delete(*handle);
189 kfree(handle);
190 }
191
192 #endif /* CONFIG_QCOM_TZMEM_MODE_SHMBRIDGE */
193
qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool * pool,size_t size,gfp_t gfp)194 static int qcom_tzmem_pool_add_memory(struct qcom_tzmem_pool *pool,
195 size_t size, gfp_t gfp)
196 {
197 int ret;
198
199 struct qcom_tzmem_area *area __free(kfree) = kzalloc(sizeof(*area),
200 gfp);
201 if (!area)
202 return -ENOMEM;
203
204 area->size = PAGE_ALIGN(size);
205
206 area->vaddr = dma_alloc_coherent(qcom_tzmem_dev, area->size,
207 &area->paddr, gfp);
208 if (!area->vaddr)
209 return -ENOMEM;
210
211 ret = qcom_tzmem_init_area(area);
212 if (ret) {
213 dma_free_coherent(qcom_tzmem_dev, area->size,
214 area->vaddr, area->paddr);
215 return ret;
216 }
217
218 ret = gen_pool_add_virt(pool->genpool, (unsigned long)area->vaddr,
219 (phys_addr_t)area->paddr, size, -1);
220 if (ret) {
221 dma_free_coherent(qcom_tzmem_dev, area->size,
222 area->vaddr, area->paddr);
223 return ret;
224 }
225
226 scoped_guard(spinlock_irqsave, &pool->lock)
227 list_add_tail(&area->list, &pool->areas);
228
229 area = NULL;
230 return 0;
231 }
232
233 /**
234 * qcom_tzmem_pool_new() - Create a new TZ memory pool.
235 * @config: Pool configuration.
236 *
237 * Create a new pool of memory suitable for sharing with the TrustZone.
238 *
239 * Must not be used in atomic context.
240 *
241 * Return: New memory pool address or ERR_PTR() on error.
242 */
243 struct qcom_tzmem_pool *
qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config * config)244 qcom_tzmem_pool_new(const struct qcom_tzmem_pool_config *config)
245 {
246 int ret = -ENOMEM;
247
248 might_sleep();
249
250 switch (config->policy) {
251 case QCOM_TZMEM_POLICY_STATIC:
252 if (!config->initial_size)
253 return ERR_PTR(-EINVAL);
254 break;
255 case QCOM_TZMEM_POLICY_MULTIPLIER:
256 if (!config->increment)
257 return ERR_PTR(-EINVAL);
258 break;
259 case QCOM_TZMEM_POLICY_ON_DEMAND:
260 break;
261 default:
262 return ERR_PTR(-EINVAL);
263 }
264
265 struct qcom_tzmem_pool *pool __free(kfree) = kzalloc(sizeof(*pool),
266 GFP_KERNEL);
267 if (!pool)
268 return ERR_PTR(-ENOMEM);
269
270 pool->genpool = gen_pool_create(PAGE_SHIFT, -1);
271 if (!pool->genpool)
272 return ERR_PTR(-ENOMEM);
273
274 gen_pool_set_algo(pool->genpool, gen_pool_best_fit, NULL);
275
276 pool->policy = config->policy;
277 pool->increment = config->increment;
278 pool->max_size = config->max_size;
279 INIT_LIST_HEAD(&pool->areas);
280 spin_lock_init(&pool->lock);
281
282 if (config->initial_size) {
283 ret = qcom_tzmem_pool_add_memory(pool, config->initial_size,
284 GFP_KERNEL);
285 if (ret) {
286 gen_pool_destroy(pool->genpool);
287 return ERR_PTR(ret);
288 }
289 }
290
291 return_ptr(pool);
292 }
293 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_new);
294
295 /**
296 * qcom_tzmem_pool_free() - Destroy a TZ memory pool and free all resources.
297 * @pool: Memory pool to free.
298 *
299 * Must not be called if any of the allocated chunks has not been freed.
300 * Must not be used in atomic context.
301 */
qcom_tzmem_pool_free(struct qcom_tzmem_pool * pool)302 void qcom_tzmem_pool_free(struct qcom_tzmem_pool *pool)
303 {
304 struct qcom_tzmem_area *area, *next;
305 struct qcom_tzmem_chunk *chunk;
306 struct radix_tree_iter iter;
307 bool non_empty = false;
308 void __rcu **slot;
309
310 might_sleep();
311
312 if (!pool)
313 return;
314
315 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
316 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
317 chunk = radix_tree_deref_slot_protected(slot,
318 &qcom_tzmem_chunks_lock);
319
320 if (chunk->owner == pool)
321 non_empty = true;
322 }
323 }
324
325 WARN(non_empty, "Freeing TZ memory pool with memory still allocated");
326
327 list_for_each_entry_safe(area, next, &pool->areas, list) {
328 list_del(&area->list);
329 qcom_tzmem_cleanup_area(area);
330 dma_free_coherent(qcom_tzmem_dev, area->size,
331 area->vaddr, area->paddr);
332 kfree(area);
333 }
334
335 gen_pool_destroy(pool->genpool);
336 kfree(pool);
337 }
338 EXPORT_SYMBOL_GPL(qcom_tzmem_pool_free);
339
devm_qcom_tzmem_pool_free(void * data)340 static void devm_qcom_tzmem_pool_free(void *data)
341 {
342 struct qcom_tzmem_pool *pool = data;
343
344 qcom_tzmem_pool_free(pool);
345 }
346
347 /**
348 * devm_qcom_tzmem_pool_new() - Managed variant of qcom_tzmem_pool_new().
349 * @dev: Device managing this resource.
350 * @config: Pool configuration.
351 *
352 * Must not be used in atomic context.
353 *
354 * Return: Address of the managed pool or ERR_PTR() on failure.
355 */
356 struct qcom_tzmem_pool *
devm_qcom_tzmem_pool_new(struct device * dev,const struct qcom_tzmem_pool_config * config)357 devm_qcom_tzmem_pool_new(struct device *dev,
358 const struct qcom_tzmem_pool_config *config)
359 {
360 struct qcom_tzmem_pool *pool;
361 int ret;
362
363 pool = qcom_tzmem_pool_new(config);
364 if (IS_ERR(pool))
365 return pool;
366
367 ret = devm_add_action_or_reset(dev, devm_qcom_tzmem_pool_free, pool);
368 if (ret)
369 return ERR_PTR(ret);
370
371 return pool;
372 }
373 EXPORT_SYMBOL_GPL(devm_qcom_tzmem_pool_new);
374
qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool * pool,size_t requested,gfp_t gfp)375 static bool qcom_tzmem_try_grow_pool(struct qcom_tzmem_pool *pool,
376 size_t requested, gfp_t gfp)
377 {
378 size_t current_size = gen_pool_size(pool->genpool);
379
380 if (pool->max_size && (current_size + requested) > pool->max_size)
381 return false;
382
383 switch (pool->policy) {
384 case QCOM_TZMEM_POLICY_STATIC:
385 return false;
386 case QCOM_TZMEM_POLICY_MULTIPLIER:
387 requested = current_size * pool->increment;
388 break;
389 case QCOM_TZMEM_POLICY_ON_DEMAND:
390 break;
391 }
392
393 return !qcom_tzmem_pool_add_memory(pool, requested, gfp);
394 }
395
396 /**
397 * qcom_tzmem_alloc() - Allocate a memory chunk suitable for sharing with TZ.
398 * @pool: TZ memory pool from which to allocate memory.
399 * @size: Number of bytes to allocate.
400 * @gfp: GFP flags.
401 *
402 * Can be used in any context.
403 *
404 * Return:
405 * Address of the allocated buffer or NULL if no more memory can be allocated.
406 * The buffer must be released using qcom_tzmem_free().
407 */
qcom_tzmem_alloc(struct qcom_tzmem_pool * pool,size_t size,gfp_t gfp)408 void *qcom_tzmem_alloc(struct qcom_tzmem_pool *pool, size_t size, gfp_t gfp)
409 {
410 unsigned long vaddr;
411 int ret;
412
413 if (!size)
414 return NULL;
415
416 size = PAGE_ALIGN(size);
417
418 struct qcom_tzmem_chunk *chunk __free(kfree) = kzalloc(sizeof(*chunk),
419 gfp);
420 if (!chunk)
421 return NULL;
422
423 again:
424 vaddr = gen_pool_alloc(pool->genpool, size);
425 if (!vaddr) {
426 if (qcom_tzmem_try_grow_pool(pool, size, gfp))
427 goto again;
428
429 return NULL;
430 }
431
432 chunk->size = size;
433 chunk->owner = pool;
434
435 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock) {
436 ret = radix_tree_insert(&qcom_tzmem_chunks, vaddr, chunk);
437 if (ret) {
438 gen_pool_free(pool->genpool, vaddr, size);
439 return NULL;
440 }
441
442 chunk = NULL;
443 }
444
445 return (void *)vaddr;
446 }
447 EXPORT_SYMBOL_GPL(qcom_tzmem_alloc);
448
449 /**
450 * qcom_tzmem_free() - Release a buffer allocated from a TZ memory pool.
451 * @vaddr: Virtual address of the buffer.
452 *
453 * Can be used in any context.
454 */
qcom_tzmem_free(void * vaddr)455 void qcom_tzmem_free(void *vaddr)
456 {
457 struct qcom_tzmem_chunk *chunk;
458
459 scoped_guard(spinlock_irqsave, &qcom_tzmem_chunks_lock)
460 chunk = radix_tree_delete_item(&qcom_tzmem_chunks,
461 (unsigned long)vaddr, NULL);
462
463 if (!chunk) {
464 WARN(1, "Virtual address %p not owned by TZ memory allocator",
465 vaddr);
466 return;
467 }
468
469 scoped_guard(spinlock_irqsave, &chunk->owner->lock)
470 gen_pool_free(chunk->owner->genpool, (unsigned long)vaddr,
471 chunk->size);
472 kfree(chunk);
473 }
474 EXPORT_SYMBOL_GPL(qcom_tzmem_free);
475
476 /**
477 * qcom_tzmem_to_phys() - Map the virtual address of TZ memory to physical.
478 * @vaddr: Virtual address of memory allocated from a TZ memory pool.
479 *
480 * Can be used in any context. The address must point to memory allocated
481 * using qcom_tzmem_alloc().
482 *
483 * Returns:
484 * Physical address mapped from the virtual or 0 if the mapping failed.
485 */
qcom_tzmem_to_phys(void * vaddr)486 phys_addr_t qcom_tzmem_to_phys(void *vaddr)
487 {
488 struct qcom_tzmem_chunk *chunk;
489 struct radix_tree_iter iter;
490 void __rcu **slot;
491 phys_addr_t ret;
492
493 guard(spinlock_irqsave)(&qcom_tzmem_chunks_lock);
494
495 radix_tree_for_each_slot(slot, &qcom_tzmem_chunks, &iter, 0) {
496 chunk = radix_tree_deref_slot_protected(slot,
497 &qcom_tzmem_chunks_lock);
498
499 ret = gen_pool_virt_to_phys(chunk->owner->genpool,
500 (unsigned long)vaddr);
501 if (ret == -1)
502 continue;
503
504 return ret;
505 }
506
507 return 0;
508 }
509 EXPORT_SYMBOL_GPL(qcom_tzmem_to_phys);
510
qcom_tzmem_enable(struct device * dev)511 int qcom_tzmem_enable(struct device *dev)
512 {
513 if (qcom_tzmem_dev)
514 return -EBUSY;
515
516 qcom_tzmem_dev = dev;
517
518 return qcom_tzmem_init();
519 }
520 EXPORT_SYMBOL_GPL(qcom_tzmem_enable);
521
522 MODULE_DESCRIPTION("TrustZone memory allocator for Qualcomm firmware drivers");
523 MODULE_AUTHOR("Bartosz Golaszewski <bartosz.golaszewski@linaro.org>");
524 MODULE_LICENSE("GPL");
525