Lines Matching +full:shared +full:- +full:memory

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015-2017, 2019-2021 Linaro Limited
35 if (shm->pages) { in release_registered_pages()
36 if (shm->flags & TEE_SHM_USER_MAPPED) in release_registered_pages()
37 unpin_user_pages(shm->pages, shm->num_pages); in release_registered_pages()
39 shm_put_kernel_pages(shm->pages, shm->num_pages); in release_registered_pages()
41 kfree(shm->pages); in release_registered_pages()
47 if (shm->flags & TEE_SHM_POOL) { in tee_shm_release()
48 teedev->pool->ops->free(teedev->pool, shm); in tee_shm_release()
49 } else if (shm->flags & TEE_SHM_DYNAMIC) { in tee_shm_release()
50 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm); in tee_shm_release()
53 dev_err(teedev->dev.parent, in tee_shm_release()
59 teedev_ctx_put(shm->ctx); in tee_shm_release()
69 struct tee_device *teedev = ctx->teedev; in shm_alloc_helper()
75 return ERR_PTR(-EINVAL); in shm_alloc_helper()
77 if (!teedev->pool) { in shm_alloc_helper()
79 ret = ERR_PTR(-EINVAL); in shm_alloc_helper()
85 ret = ERR_PTR(-ENOMEM); in shm_alloc_helper()
89 refcount_set(&shm->refcount, 1); in shm_alloc_helper()
90 shm->flags = flags; in shm_alloc_helper()
91 shm->id = id; in shm_alloc_helper()
96 * to call teedev_ctx_get() or clear shm->ctx in case it's not in shm_alloc_helper()
99 shm->ctx = ctx; in shm_alloc_helper()
101 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align); in shm_alloc_helper()
117 * tee_shm_alloc_user_buf() - Allocate shared memory for user space
118 * @ctx: Context that allocates the shared memory
119 * @size: Requested size of shared memory
121 * Memory allocated as user space shared memory is automatically freed when
124 * memory.
131 struct tee_device *teedev = ctx->teedev; in tee_shm_alloc_user_buf()
136 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
137 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); in tee_shm_alloc_user_buf()
138 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
144 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
145 idr_remove(&teedev->idr, id); in tee_shm_alloc_user_buf()
146 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
150 mutex_lock(&teedev->mutex); in tee_shm_alloc_user_buf()
151 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_alloc_user_buf()
152 mutex_unlock(&teedev->mutex); in tee_shm_alloc_user_buf()
162 * tee_shm_alloc_kernel_buf() - Allocate shared memory for kernel buffer
163 * @ctx: Context that allocates the shared memory
164 * @size: Requested size of shared memory
166 * The returned memory registered in secure world and is suitable to be
167 * passed as a memory buffer in parameter argument to
168 * tee_client_invoke_func(). The memory allocated is later freed with a
177 return shm_alloc_helper(ctx, size, PAGE_SIZE, flags, -1); in tee_shm_alloc_kernel_buf()
182 * tee_shm_alloc_priv_buf() - Allocate shared memory for a privately shared
184 * @ctx: Context that allocates the shared memory
185 * @size: Requested size of shared memory
187 * This function returns similar shared memory as
188 * tee_shm_alloc_kernel_buf(), but with the difference that the memory
190 * passing memory not registered in advance.
201 return shm_alloc_helper(ctx, size, sizeof(long) * 2, flags, -1); in tee_shm_alloc_priv_buf()
209 struct tee_device *teedev = ctx->teedev; in register_shm_helper()
218 return ERR_PTR(-EINVAL); in register_shm_helper()
220 if (!teedev->desc->ops->shm_register || in register_shm_helper()
221 !teedev->desc->ops->shm_unregister) { in register_shm_helper()
222 ret = ERR_PTR(-ENOTSUPP); in register_shm_helper()
230 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
234 refcount_set(&shm->refcount, 1); in register_shm_helper()
235 shm->flags = flags; in register_shm_helper()
236 shm->ctx = ctx; in register_shm_helper()
237 shm->id = id; in register_shm_helper()
242 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
246 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL); in register_shm_helper()
247 if (!shm->pages) { in register_shm_helper()
248 ret = ERR_PTR(-ENOMEM); in register_shm_helper()
252 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0, in register_shm_helper()
255 ret = len ? ERR_PTR(len) : ERR_PTR(-ENOMEM); in register_shm_helper()
264 shm_get_kernel_pages(shm->pages, num_pages); in register_shm_helper()
266 shm->offset = off; in register_shm_helper()
267 shm->size = len; in register_shm_helper()
268 shm->num_pages = num_pages; in register_shm_helper()
270 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages, in register_shm_helper()
271 shm->num_pages, start); in register_shm_helper()
280 unpin_user_pages(shm->pages, shm->num_pages); in register_shm_helper()
282 shm_put_kernel_pages(shm->pages, shm->num_pages); in register_shm_helper()
284 kfree(shm->pages); in register_shm_helper()
295 * tee_shm_register_user_buf() - Register a userspace shared memory buffer
296 * @ctx: Context that registers the shared memory
297 * @addr: The userspace address of the shared buffer
298 * @length: Length of the shared buffer
306 struct tee_device *teedev = ctx->teedev; in tee_shm_register_user_buf()
313 return ERR_PTR(-EFAULT); in tee_shm_register_user_buf()
315 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
316 id = idr_alloc(&teedev->idr, NULL, 1, 0, GFP_KERNEL); in tee_shm_register_user_buf()
317 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
324 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
325 idr_remove(&teedev->idr, id); in tee_shm_register_user_buf()
326 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
330 mutex_lock(&teedev->mutex); in tee_shm_register_user_buf()
331 ret = idr_replace(&teedev->idr, shm, id); in tee_shm_register_user_buf()
332 mutex_unlock(&teedev->mutex); in tee_shm_register_user_buf()
342 * tee_shm_register_kernel_buf() - Register kernel memory to be shared with
344 * @ctx: Context that registers the shared memory
362 return register_shm_helper(ctx, &iter, flags, -1); in tee_shm_register_kernel_buf()
368 tee_shm_put(filp->private_data); in tee_shm_fop_release()
374 struct tee_shm *shm = filp->private_data; in tee_shm_fop_mmap()
375 size_t size = vma->vm_end - vma->vm_start; in tee_shm_fop_mmap()
377 /* Refuse sharing shared memory provided by application */ in tee_shm_fop_mmap()
378 if (shm->flags & TEE_SHM_USER_MAPPED) in tee_shm_fop_mmap()
379 return -EINVAL; in tee_shm_fop_mmap()
382 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT) in tee_shm_fop_mmap()
383 return -EINVAL; in tee_shm_fop_mmap()
385 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT, in tee_shm_fop_mmap()
386 size, vma->vm_page_prot); in tee_shm_fop_mmap()
396 * tee_shm_get_fd() - Increase reference count and return file descriptor
397 * @shm: Shared memory handle
398 * @returns user space file descriptor to shared memory
404 if (shm->id < 0) in tee_shm_get_fd()
405 return -EINVAL; in tee_shm_get_fd()
408 refcount_inc(&shm->refcount); in tee_shm_get_fd()
416 * tee_shm_free() - Free shared memory
417 * @shm: Handle to shared memory to free
426 * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
427 * @shm: Shared memory handle
428 * @offs: Offset from start of this shared memory
429 * @returns virtual address of the shared memory + offs if offs is within
430 * the bounds of this shared memory, else an ERR_PTR
434 if (!shm->kaddr) in tee_shm_get_va()
435 return ERR_PTR(-EINVAL); in tee_shm_get_va()
436 if (offs >= shm->size) in tee_shm_get_va()
437 return ERR_PTR(-EINVAL); in tee_shm_get_va()
438 return (char *)shm->kaddr + offs; in tee_shm_get_va()
443 * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
444 * @shm: Shared memory handle
445 * @offs: Offset from start of this shared memory
447 * @returns 0 if offs is within the bounds of this shared memory, else an
452 if (offs >= shm->size) in tee_shm_get_pa()
453 return -EINVAL; in tee_shm_get_pa()
455 *pa = shm->paddr + offs; in tee_shm_get_pa()
461 * tee_shm_get_from_id() - Find shared memory object and increase reference
463 * @ctx: Context owning the shared memory
464 * @id: Id of shared memory object
473 return ERR_PTR(-EINVAL); in tee_shm_get_from_id()
475 teedev = ctx->teedev; in tee_shm_get_from_id()
476 mutex_lock(&teedev->mutex); in tee_shm_get_from_id()
477 shm = idr_find(&teedev->idr, id); in tee_shm_get_from_id()
483 if (!shm || shm->ctx != ctx) in tee_shm_get_from_id()
484 shm = ERR_PTR(-EINVAL); in tee_shm_get_from_id()
486 refcount_inc(&shm->refcount); in tee_shm_get_from_id()
487 mutex_unlock(&teedev->mutex); in tee_shm_get_from_id()
493 * tee_shm_put() - Decrease reference count on a shared memory handle
494 * @shm: Shared memory handle
498 struct tee_device *teedev = shm->ctx->teedev; in tee_shm_put()
501 mutex_lock(&teedev->mutex); in tee_shm_put()
502 if (refcount_dec_and_test(&shm->refcount)) { in tee_shm_put()
509 if (shm->id >= 0) in tee_shm_put()
510 idr_remove(&teedev->idr, shm->id); in tee_shm_put()
513 mutex_unlock(&teedev->mutex); in tee_shm_put()