Lines Matching defs:shm

34 static void release_registered_pages(struct tee_shm *shm)
36 if (shm->pages) {
37 if (shm->flags & TEE_SHM_USER_MAPPED)
38 unpin_user_pages(shm->pages, shm->num_pages);
40 shm_put_kernel_pages(shm->pages, shm->num_pages);
42 kfree(shm->pages);
46 static void tee_shm_release(struct tee_device *teedev, struct tee_shm *shm)
48 if (shm->flags & TEE_SHM_POOL) {
49 teedev->pool->ops->free(teedev->pool, shm);
50 } else if (shm->flags & TEE_SHM_DYNAMIC) {
51 int rc = teedev->desc->ops->shm_unregister(shm->ctx, shm);
55 "unregister shm %p failed: %d", shm, rc);
57 release_registered_pages(shm);
60 teedev_ctx_put(shm->ctx);
62 kfree(shm);
71 struct tee_shm *shm;
84 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
85 if (!shm) {
90 refcount_set(&shm->refcount, 1);
91 shm->flags = flags;
92 shm->id = id;
95 * We're assigning this as it is needed if the shm is to be
97 * to call teedev_ctx_get() or clear shm->ctx in case it's not
100 shm->ctx = ctx;
102 rc = teedev->pool->ops->alloc(teedev->pool, shm, size, align);
109 return shm;
111 kfree(shm);
133 struct tee_shm *shm;
143 shm = shm_alloc_helper(ctx, size, PAGE_SIZE, flags, id);
144 if (IS_ERR(shm)) {
148 return shm;
152 ret = idr_replace(&teedev->idr, shm, id);
155 tee_shm_free(shm);
159 return shm;
206 int tee_dyn_shm_alloc_helper(struct tee_shm *shm, size_t size, size_t align,
208 struct tee_shm *shm,
222 shm->kaddr = alloc_pages_exact(nr_pages * PAGE_SIZE,
224 if (!shm->kaddr)
227 shm->paddr = virt_to_phys(shm->kaddr);
228 shm->size = nr_pages * PAGE_SIZE;
237 pages[i] = virt_to_page((u8 *)shm->kaddr + i * PAGE_SIZE);
239 shm->pages = pages;
240 shm->num_pages = nr_pages;
243 rc = shm_register(shm->ctx, shm, pages, nr_pages,
244 (unsigned long)shm->kaddr);
251 free_pages_exact(shm->kaddr, shm->size);
252 shm->kaddr = NULL;
257 void tee_dyn_shm_free_helper(struct tee_shm *shm,
259 struct tee_shm *shm))
262 shm_unregister(shm->ctx, shm);
263 free_pages_exact(shm->kaddr, shm->size);
264 shm->kaddr = NULL;
265 kfree(shm->pages);
266 shm->pages = NULL;
275 struct tee_shm *shm;
293 shm = kzalloc(sizeof(*shm), GFP_KERNEL);
294 if (!shm) {
299 refcount_set(&shm->refcount, 1);
300 shm->flags = flags;
301 shm->ctx = ctx;
302 shm->id = id;
311 shm->pages = kcalloc(num_pages, sizeof(*shm->pages), GFP_KERNEL);
312 if (!shm->pages) {
317 len = iov_iter_extract_pages(iter, &shm->pages, LONG_MAX, num_pages, 0,
329 shm_get_kernel_pages(shm->pages, num_pages);
331 shm->offset = off;
332 shm->size = len;
333 shm->num_pages = num_pages;
335 rc = teedev->desc->ops->shm_register(ctx, shm, shm->pages,
336 shm->num_pages, start);
342 return shm;
345 unpin_user_pages(shm->pages, shm->num_pages);
347 shm_put_kernel_pages(shm->pages, shm->num_pages);
349 kfree(shm->pages);
351 kfree(shm);
372 struct tee_shm *shm;
387 shm = register_shm_helper(ctx, &iter, flags, id);
388 if (IS_ERR(shm)) {
392 return shm;
396 ret = idr_replace(&teedev->idr, shm, id);
399 tee_shm_free(shm);
403 return shm;
439 struct tee_shm *shm = filp->private_data;
443 if (shm->flags & TEE_SHM_USER_MAPPED)
447 if (vma->vm_pgoff + vma_pages(vma) > shm->size >> PAGE_SHIFT)
450 return remap_pfn_range(vma, vma->vm_start, shm->paddr >> PAGE_SHIFT,
462 * @shm: Shared memory handle
465 int tee_shm_get_fd(struct tee_shm *shm)
469 if (shm->id < 0)
473 refcount_inc(&shm->refcount);
474 fd = anon_inode_getfd("tee_shm", &tee_shm_fops, shm, O_RDWR);
476 tee_shm_put(shm);
482 * @shm: Handle to shared memory to free
484 void tee_shm_free(struct tee_shm *shm)
486 tee_shm_put(shm);
492 * @shm: Shared memory handle
497 void *tee_shm_get_va(struct tee_shm *shm, size_t offs)
499 if (!shm->kaddr)
501 if (offs >= shm->size)
503 return (char *)shm->kaddr + offs;
509 * @shm: Shared memory handle
515 int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa)
517 if (offs >= shm->size)
520 *pa = shm->paddr + offs;
535 struct tee_shm *shm;
542 shm = idr_find(&teedev->idr, id);
548 if (!shm || shm->ctx != ctx)
549 shm = ERR_PTR(-EINVAL);
551 refcount_inc(&shm->refcount);
553 return shm;
559 * @shm: Shared memory handle
561 void tee_shm_put(struct tee_shm *shm)
563 struct tee_device *teedev = shm->ctx->teedev;
567 if (refcount_dec_and_test(&shm->refcount)) {
574 if (shm->id >= 0)
575 idr_remove(&teedev->idr, shm->id);
581 tee_shm_release(teedev, shm);