1caab277bSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only
2d15bd7eeSSumit Semwal /*
3d15bd7eeSSumit Semwal * Framework for buffer objects that can be shared across devices/subsystems.
4d15bd7eeSSumit Semwal *
5d15bd7eeSSumit Semwal * Copyright(C) 2011 Linaro Limited. All rights reserved.
6d15bd7eeSSumit Semwal * Author: Sumit Semwal <sumit.semwal@ti.com>
7d15bd7eeSSumit Semwal *
8d15bd7eeSSumit Semwal * Many thanks to linaro-mm-sig list, and specially
9d15bd7eeSSumit Semwal * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
10d15bd7eeSSumit Semwal * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
11d15bd7eeSSumit Semwal * refining of this idea.
12d15bd7eeSSumit Semwal */
13d15bd7eeSSumit Semwal
14d15bd7eeSSumit Semwal #include <linux/fs.h>
15d15bd7eeSSumit Semwal #include <linux/slab.h>
16d15bd7eeSSumit Semwal #include <linux/dma-buf.h>
17f54d1867SChris Wilson #include <linux/dma-fence.h>
18c19083c7SJason Ekstrand #include <linux/dma-fence-unwrap.h>
19d15bd7eeSSumit Semwal #include <linux/anon_inodes.h>
20d15bd7eeSSumit Semwal #include <linux/export.h>
21b89e3563SSumit Semwal #include <linux/debugfs.h>
229abdffe2SSumit Semwal #include <linux/list.h>
23b89e3563SSumit Semwal #include <linux/module.h>
2420e10881SJason Ekstrand #include <linux/mutex.h>
259b495a58SMaarten Lankhorst #include <linux/seq_file.h>
2652791eeeSChristian König #include <linux/sync_file.h>
27b02da6f8SMuhammad Falak R Wani #include <linux/poll.h>
28ed63bb1dSGreg Hackmann #include <linux/dma-resv.h>
29933a90bfSLinus Torvalds #include <linux/mm.h>
30d15bd7eeSSumit Semwal #include <linux/mount.h>
31c11e391dSDaniel Vetter #include <linux/pseudo_fs.h>
32ed63bb1dSGreg Hackmann
33c11e391dSDaniel Vetter #include <uapi/linux/dma-buf.h>
34bdb8d06dSHridya Valsaraju #include <uapi/linux/magic.h>
35bdb8d06dSHridya Valsaraju
36d15bd7eeSSumit Semwal #include "dma-buf-sysfs-stats.h"
37d15bd7eeSSumit Semwal
38b89e3563SSumit Semwal static inline int is_dma_buf_file(struct file *);
39b89e3563SSumit Semwal
40b89e3563SSumit Semwal static DEFINE_MUTEX(dmabuf_list_mutex);
41b89e3563SSumit Semwal static LIST_HEAD(dmabuf_list);
42b89e3563SSumit Semwal
__dma_buf_list_add(struct dma_buf * dmabuf)43b89e3563SSumit Semwal static void __dma_buf_list_add(struct dma_buf *dmabuf)
44b89e3563SSumit Semwal {
45bb2bb903SGreg Hackmann mutex_lock(&dmabuf_list_mutex);
46bb2bb903SGreg Hackmann list_add(&dmabuf->list_node, &dmabuf_list);
47bb2bb903SGreg Hackmann mutex_unlock(&dmabuf_list_mutex);
48bb2bb903SGreg Hackmann }
49*a3232428SKees Cook
__dma_buf_list_del(struct dma_buf * dmabuf)50bb2bb903SGreg Hackmann static void __dma_buf_list_del(struct dma_buf *dmabuf)
51bb2bb903SGreg Hackmann {
526348dd29SCharan Teja Kalla if (!dmabuf)
53bb2bb903SGreg Hackmann return;
54*a3232428SKees Cook
556348dd29SCharan Teja Kalla mutex_lock(&dmabuf_list_mutex);
56bb2bb903SGreg Hackmann list_del(&dmabuf->list_node);
570f60d288SAl Viro mutex_unlock(&dmabuf_list_mutex);
58bb2bb903SGreg Hackmann }
59bb2bb903SGreg Hackmann
60bb2bb903SGreg Hackmann /**
614ab59c3cSSumit Semwal * dma_buf_iter_begin - begin iteration through global list of all DMA buffers
62d15bd7eeSSumit Semwal *
63d15bd7eeSSumit Semwal * Returns the first buffer in the global list of DMA-bufs that's not in the
64d15bd7eeSSumit Semwal * process of being destroyed. Increments that buffer's reference count to
654ab59c3cSSumit Semwal * prevent buffer destruction. Callers must release the reference, either by
6619a508bdSCharan Teja Reddy * continuing iteration with dma_buf_iter_next(), or with dma_buf_put().
6719a508bdSCharan Teja Reddy *
68d15bd7eeSSumit Semwal * Return:
69f00b4dadSDaniel Vetter * * First buffer from global list, with refcount elevated
70f00b4dadSDaniel Vetter * * NULL if no active buffers are present
719b495a58SMaarten Lankhorst */
dma_buf_iter_begin(void)72ff2d2384SMichel Dänzer struct dma_buf *dma_buf_iter_begin(void)
73ff2d2384SMichel Dänzer {
74ff2d2384SMichel Dänzer struct dma_buf *ret = NULL, *dmabuf;
759b495a58SMaarten Lankhorst
766b51b02aSChristian König /*
779b495a58SMaarten Lankhorst * The list mutex does not protect a dmabuf's refcount, so it can be
7863c57e8dSGuangming Cao * zeroed while we are iterating. We cannot call get_dma_buf() since the
79d15bd7eeSSumit Semwal * caller may not already own a reference to the buffer.
80b89e3563SSumit Semwal */
8152791eeeSChristian König mutex_lock(&dmabuf_list_mutex);
8252791eeeSChristian König list_for_each_entry(dmabuf, &dmabuf_list, list_node) {
833aac4502SMaarten Lankhorst if (file_ref_get(&dmabuf->file->f_ref)) {
84f492283bSCharan Teja Reddy ret = dmabuf;
859abdffe2SSumit Semwal break;
86d1f37226SCong Wang }
87d15bd7eeSSumit Semwal }
884ab59c3cSSumit Semwal mutex_unlock(&dmabuf_list_mutex);
894ab59c3cSSumit Semwal return ret;
9005cd8469SCharan Teja Reddy }
9105cd8469SCharan Teja Reddy
9205cd8469SCharan Teja Reddy /**
9305cd8469SCharan Teja Reddy * dma_buf_iter_next - continue iteration through global list of all DMA buffers
9405cd8469SCharan Teja Reddy * @dmabuf: [in] pointer to dma_buf
9505cd8469SCharan Teja Reddy *
9605cd8469SCharan Teja Reddy * Decrements the reference count on the provided buffer. Returns the next
9705cd8469SCharan Teja Reddy * buffer from the remainder of the global list of DMA-bufs with its reference
98f728a5eaSChristian König * count incremented. Callers must release the reference, either by continuing
9905cd8469SCharan Teja Reddy * iteration with dma_buf_iter_next(), or with dma_buf_put().
10005cd8469SCharan Teja Reddy *
10105cd8469SCharan Teja Reddy * Return:
102f728a5eaSChristian König * * Next buffer from global list, with refcount elevated
10305cd8469SCharan Teja Reddy * * NULL if no additional active buffers are present
10405cd8469SCharan Teja Reddy */
dma_buf_iter_next(struct dma_buf * dmabuf)10505cd8469SCharan Teja Reddy struct dma_buf *dma_buf_iter_next(struct dma_buf *dmabuf)
10605cd8469SCharan Teja Reddy {
1074ab59c3cSSumit Semwal struct dma_buf *ret = NULL;
1084ab59c3cSSumit Semwal
1094ab59c3cSSumit Semwal /*
1104ab59c3cSSumit Semwal * The list mutex does not protect a dmabuf's refcount, so it can be
1114ab59c3cSSumit Semwal * zeroed while we are iterating. We cannot call get_dma_buf() since the
1124ab59c3cSSumit Semwal * caller may not already own a reference to the buffer.
1134ab59c3cSSumit Semwal */
1144ab59c3cSSumit Semwal mutex_lock(&dmabuf_list_mutex);
1154ab59c3cSSumit Semwal dma_buf_put(dmabuf);
1164ab59c3cSSumit Semwal list_for_each_entry_continue(dmabuf, &dmabuf_list, list_node) {
1174ab59c3cSSumit Semwal if (file_ref_get(&dmabuf->file->f_ref)) {
1184ab59c3cSSumit Semwal ret = dmabuf;
1194ab59c3cSSumit Semwal break;
1204ab59c3cSSumit Semwal }
1214ab59c3cSSumit Semwal }
122d15bd7eeSSumit Semwal mutex_unlock(&dmabuf_list_mutex);
123d15bd7eeSSumit Semwal return ret;
124d15bd7eeSSumit Semwal }
1254ab59c3cSSumit Semwal
dmabuffs_dname(struct dentry * dentry,char * buffer,int buflen)1264ab59c3cSSumit Semwal static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
1274ab59c3cSSumit Semwal {
1284ab59c3cSSumit Semwal struct dma_buf *dmabuf;
1294ab59c3cSSumit Semwal char name[DMA_BUF_NAME_LEN];
1304ab59c3cSSumit Semwal ssize_t ret = 0;
1314c78513eSDaniel Vetter
1324c78513eSDaniel Vetter dmabuf = dentry->d_fsdata;
1334c78513eSDaniel Vetter spin_lock(&dmabuf->name_lock);
1344c78513eSDaniel Vetter if (dmabuf->name)
1354c78513eSDaniel Vetter ret = strscpy(name, dmabuf->name, sizeof(name));
1364c78513eSDaniel Vetter spin_unlock(&dmabuf->name_lock);
1374c78513eSDaniel Vetter
1384c78513eSDaniel Vetter return dynamic_dname(buffer, buflen, "/%s:%s",
1394c78513eSDaniel Vetter dentry->d_name.name, ret > 0 ? name : "");
140e3a9d6c5SAndrew F. Davis }
141e3a9d6c5SAndrew F. Davis
dma_buf_release(struct dentry * dentry)142e3a9d6c5SAndrew F. Davis static void dma_buf_release(struct dentry *dentry)
143e3a9d6c5SAndrew F. Davis {
1444c78513eSDaniel Vetter struct dma_buf *dmabuf;
145b02da6f8SMuhammad Falak R Wani
1464c78513eSDaniel Vetter dmabuf = dentry->d_fsdata;
1474c78513eSDaniel Vetter if (unlikely(!dmabuf))
1484c78513eSDaniel Vetter return;
1498021fa16SDmitry Osipenko
1504c78513eSDaniel Vetter BUG_ON(dmabuf->vmapping_counter);
1514c78513eSDaniel Vetter
15219e8697bSChristopher James Halse Rogers /*
15319e8697bSChristopher James Halse Rogers * If you hit this BUG() it could mean:
15419e8697bSChristopher James Halse Rogers * * There's a file reference imbalance in dma_buf_poll / dma_buf_poll_cb or somewhere else
15519e8697bSChristopher James Halse Rogers * * dmabuf->cb_in/out.active are non-0 despite no pending fence callback
15619e8697bSChristopher James Halse Rogers */
15719e8697bSChristopher James Halse Rogers BUG_ON(dmabuf->cb_in.active || dmabuf->cb_out.active);
15819e8697bSChristopher James Halse Rogers
15919e8697bSChristopher James Halse Rogers dma_buf_stats_teardown(dmabuf);
16019e8697bSChristopher James Halse Rogers dmabuf->ops->release(dmabuf);
16119e8697bSChristopher James Halse Rogers
16219e8697bSChristopher James Halse Rogers if (dmabuf->resv == (struct dma_resv *)&dmabuf[1])
16319e8697bSChristopher James Halse Rogers dma_resv_fini(dmabuf->resv);
16419e8697bSChristopher James Halse Rogers
16519e8697bSChristopher James Halse Rogers WARN_ON(!list_empty(&dmabuf->attachments));
16619e8697bSChristopher James Halse Rogers module_put(dmabuf->owner);
16719e8697bSChristopher James Halse Rogers kfree(dmabuf->name);
16819e8697bSChristopher James Halse Rogers kfree(dmabuf);
16919e8697bSChristopher James Halse Rogers }
17019e8697bSChristopher James Halse Rogers
dma_buf_file_release(struct inode * inode,struct file * file)17119e8697bSChristopher James Halse Rogers static int dma_buf_file_release(struct inode *inode, struct file *file)
17219e8697bSChristopher James Halse Rogers {
17319e8697bSChristopher James Halse Rogers if (!is_dma_buf_file(file))
17419e8697bSChristopher James Halse Rogers return -EINVAL;
17519e8697bSChristopher James Halse Rogers
17619e8697bSChristopher James Halse Rogers __dma_buf_list_del(file->private_data);
17719e8697bSChristopher James Halse Rogers
178e7e21c72SDaniel Vetter return 0;
179102514ecSDaniel Vetter }
180e7e21c72SDaniel Vetter
181e7e21c72SDaniel Vetter static const struct dentry_operations dma_buf_dentry_ops = {
182102514ecSDaniel Vetter .d_dname = dmabuffs_dname,
183102514ecSDaniel Vetter .d_release = dma_buf_release,
18452791eeeSChristian König };
185e7e21c72SDaniel Vetter
186e7e21c72SDaniel Vetter static struct vfsmount *dma_buf_mnt;
187e7e21c72SDaniel Vetter
dma_buf_fs_init_context(struct fs_context * fc)188e7e21c72SDaniel Vetter static int dma_buf_fs_init_context(struct fs_context *fc)
189a9a08845SLinus Torvalds {
190e7e21c72SDaniel Vetter struct pseudo_fs_context *ctx;
191e7e21c72SDaniel Vetter
192a9a08845SLinus Torvalds ctx = init_pseudo(fc, DMA_BUF_MAGIC);
193e7e21c72SDaniel Vetter if (!ctx)
194e7e21c72SDaniel Vetter return -ENOMEM;
195e7e21c72SDaniel Vetter ctx->dops = &dma_buf_dentry_ops;
196e7e21c72SDaniel Vetter return 0;
197e7e21c72SDaniel Vetter }
19820e10881SJason Ekstrand
19920e10881SJason Ekstrand static struct file_system_type dma_buf_fs_type = {
20020e10881SJason Ekstrand .name = "dmabuf",
201e7e21c72SDaniel Vetter .init_fs_context = dma_buf_fs_init_context,
202e7e21c72SDaniel Vetter .kill_sb = kill_anon_super,
203f54d1867SChris Wilson };
2049b495a58SMaarten Lankhorst
dma_buf_mmap_internal(struct file * file,struct vm_area_struct * vma)2059b495a58SMaarten Lankhorst static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
206ff2d2384SMichel Dänzer {
2079b495a58SMaarten Lankhorst struct dma_buf *dmabuf;
2089b495a58SMaarten Lankhorst
2099b495a58SMaarten Lankhorst if (!is_dma_buf_file(file))
2109b495a58SMaarten Lankhorst return -EINVAL;
2119b495a58SMaarten Lankhorst
2129b495a58SMaarten Lankhorst dmabuf = file->private_data;
2136b51b02aSChristian König
214ff2d2384SMichel Dänzer /* check if buffer supports mmap */
215ff2d2384SMichel Dänzer if (!dmabuf->ops->mmap)
2166b51b02aSChristian König return -EINVAL;
2176b51b02aSChristian König
2180a42016dSChristian König /* check for overflowing the buffer's size */
2196b51b02aSChristian König if (vma->vm_pgoff + vma_pages(vma) >
2206b51b02aSChristian König dmabuf->size >> PAGE_SHIFT)
2210a42016dSChristian König return -EINVAL;
2226b51b02aSChristian König
2236b51b02aSChristian König return dmabuf->ops->mmap(dmabuf, vma);
2246b51b02aSChristian König }
2257bc80a54SChristian König
dma_buf_llseek(struct file * file,loff_t offset,int whence)2267bc80a54SChristian König static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
2276b51b02aSChristian König {
2286b51b02aSChristian König struct dma_buf *dmabuf;
2296b51b02aSChristian König loff_t base;
2306b51b02aSChristian König
2316b51b02aSChristian König if (!is_dma_buf_file(file))
2320a42016dSChristian König return -EBADF;
2336b51b02aSChristian König
2346b51b02aSChristian König dmabuf = file->private_data;
2359b495a58SMaarten Lankhorst
2369b495a58SMaarten Lankhorst /* only support discovering the end of the buffer,
237afc9a42bSAl Viro * but also allow SEEK_SET to maintain the idiomatic
2389b495a58SMaarten Lankhorst * SEEK_END(0), SEEK_CUR(0) pattern.
2399b495a58SMaarten Lankhorst */
24052791eeeSChristian König if (whence == SEEK_END)
24101699437SAl Viro base = dmabuf->size;
2429b495a58SMaarten Lankhorst else if (whence == SEEK_SET)
2439b495a58SMaarten Lankhorst base = 0;
2449b495a58SMaarten Lankhorst else
245a9a08845SLinus Torvalds return -EINVAL;
2469b495a58SMaarten Lankhorst
2479b495a58SMaarten Lankhorst if (offset != 0)
2489b495a58SMaarten Lankhorst return -EINVAL;
2499b495a58SMaarten Lankhorst
2509b495a58SMaarten Lankhorst return base + offset;
251a9a08845SLinus Torvalds }
2529b495a58SMaarten Lankhorst
2539b495a58SMaarten Lankhorst /**
2549b495a58SMaarten Lankhorst * DOC: implicit fence polling
2556b51b02aSChristian König *
256b016cd6eSChris Wilson * To support cross-device and cross-driver synchronization of buffer access
2576b51b02aSChristian König * implicit fences (represented internally in the kernel with &struct dma_fence)
2586b51b02aSChristian König * can be attached to a &dma_buf. The glue for that and a few related things are
259b016cd6eSChris Wilson * provided in the &dma_resv structure.
2606b51b02aSChristian König *
2619b495a58SMaarten Lankhorst * Userspace can query the state of these implicitly tracked fences using poll()
2629b495a58SMaarten Lankhorst * and related system calls:
263a9a08845SLinus Torvalds *
2649b495a58SMaarten Lankhorst * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
265a9a08845SLinus Torvalds * most recent write or exclusive fence.
2669b495a58SMaarten Lankhorst *
2679b495a58SMaarten Lankhorst * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
2686b51b02aSChristian König * all attached fences, shared and exclusive ones.
269ff2d2384SMichel Dänzer *
270ff2d2384SMichel Dänzer * Note that this only signals the completion of the respective fences, i.e. the
271ff2d2384SMichel Dänzer * DMA transfers are complete. Cache flushing and any other necessary
2720a42016dSChristian König * preparations before CPU access can begin still need to happen.
2736b51b02aSChristian König *
2743c3b177aSMaarten Lankhorst * As an alternative to poll(), the set of fences on DMA buffer can be
2756b51b02aSChristian König * exported as a &sync_file using &dma_buf_sync_file_export.
276a9a08845SLinus Torvalds */
2779b495a58SMaarten Lankhorst
dma_buf_poll_cb(struct dma_fence * fence,struct dma_fence_cb * cb)27804a5faa8SMaarten Lankhorst static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
2799b495a58SMaarten Lankhorst {
2806b51b02aSChristian König struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
2816b51b02aSChristian König struct dma_buf *dmabuf = container_of(dcb->poll, struct dma_buf, poll);
2826b51b02aSChristian König unsigned long flags;
2836b51b02aSChristian König
2846b51b02aSChristian König spin_lock_irqsave(&dcb->poll->lock, flags);
2856b51b02aSChristian König wake_up_locked_poll(dcb->poll, dcb->active);
2866b51b02aSChristian König dcb->active = 0;
2876b51b02aSChristian König spin_unlock_irqrestore(&dcb->poll->lock, flags);
2886b51b02aSChristian König dma_fence_put(fence);
2896b51b02aSChristian König /* Paired with get_file in dma_buf_poll */
2906b51b02aSChristian König fput(dmabuf->file);
2916b51b02aSChristian König }
292ff2d2384SMichel Dänzer
dma_buf_poll_add_cb(struct dma_resv * resv,bool write,struct dma_buf_poll_cb_t * dcb)293ff2d2384SMichel Dänzer static bool dma_buf_poll_add_cb(struct dma_resv *resv, bool write,
294ff2d2384SMichel Dänzer struct dma_buf_poll_cb_t *dcb)
2950a42016dSChristian König {
2966b51b02aSChristian König struct dma_resv_iter cursor;
2979b495a58SMaarten Lankhorst struct dma_fence *fence;
2986b51b02aSChristian König int r;
2996b51b02aSChristian König
3006b51b02aSChristian König dma_resv_for_each_fence(&cursor, resv, dma_resv_usage_rw(write),
3019b495a58SMaarten Lankhorst fence) {
3029b495a58SMaarten Lankhorst dma_fence_get(fence);
3036b51b02aSChristian König r = dma_fence_add_callback(fence, &dcb->cb, dma_buf_poll_cb);
3049b495a58SMaarten Lankhorst if (!r)
3059b495a58SMaarten Lankhorst return true;
3069b495a58SMaarten Lankhorst dma_fence_put(fence);
307bb2bb903SGreg Hackmann }
308bb2bb903SGreg Hackmann
309e73c317eSGuangming Cao return false;
310e73c317eSGuangming Cao }
311bb2bb903SGreg Hackmann
dma_buf_poll(struct file * file,poll_table * poll)3126d3ba803SKrzysztof Kozlowski static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
313bb2bb903SGreg Hackmann {
314bb2bb903SGreg Hackmann struct dma_buf *dmabuf;
315bb2bb903SGreg Hackmann struct dma_resv *resv;
316bb2bb903SGreg Hackmann __poll_t events;
317bb2bb903SGreg Hackmann
318bb2bb903SGreg Hackmann dmabuf = file->private_data;
319bb2bb903SGreg Hackmann if (!dmabuf || !dmabuf->resv)
320bb2bb903SGreg Hackmann return EPOLLERR;
321bb2bb903SGreg Hackmann
322bb2bb903SGreg Hackmann resv = dmabuf->resv;
323bb2bb903SGreg Hackmann
324bb2bb903SGreg Hackmann poll_wait(file, &dmabuf->poll, poll);
325bb2bb903SGreg Hackmann
326bb2bb903SGreg Hackmann events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
3276348dd29SCharan Teja Kalla if (!events)
328bb2bb903SGreg Hackmann return 0;
329bb2bb903SGreg Hackmann
3306348dd29SCharan Teja Kalla dma_resv_lock(resv, NULL);
331bb2bb903SGreg Hackmann
332e73c317eSGuangming Cao if (events & EPOLLOUT) {
333bb2bb903SGreg Hackmann struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_out;
334bb2bb903SGreg Hackmann
33520e10881SJason Ekstrand /* Check that callback isn't busy */
33620e10881SJason Ekstrand spin_lock_irq(&dmabuf->poll.lock);
33720e10881SJason Ekstrand if (dcb->active)
33820e10881SJason Ekstrand events &= ~EPOLLOUT;
33920e10881SJason Ekstrand else
34020e10881SJason Ekstrand dcb->active = EPOLLOUT;
34120e10881SJason Ekstrand spin_unlock_irq(&dmabuf->poll.lock);
34220e10881SJason Ekstrand
34320e10881SJason Ekstrand if (events & EPOLLOUT) {
34420e10881SJason Ekstrand /* Paired with fput in dma_buf_poll_cb */
34520e10881SJason Ekstrand get_file(dmabuf->file);
34620e10881SJason Ekstrand
34720e10881SJason Ekstrand if (!dma_buf_poll_add_cb(resv, true, dcb))
34820e10881SJason Ekstrand /* No callback queued, wake up any other waiters */
34920e10881SJason Ekstrand dma_buf_poll_cb(NULL, &dcb->cb);
35020e10881SJason Ekstrand else
35120e10881SJason Ekstrand events &= ~EPOLLOUT;
35220e10881SJason Ekstrand }
35320e10881SJason Ekstrand }
35420e10881SJason Ekstrand
35520e10881SJason Ekstrand if (events & EPOLLIN) {
35620e10881SJason Ekstrand struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_in;
35720e10881SJason Ekstrand
35820e10881SJason Ekstrand /* Check that callback isn't busy */
35920e10881SJason Ekstrand spin_lock_irq(&dmabuf->poll.lock);
36020e10881SJason Ekstrand if (dcb->active)
36120e10881SJason Ekstrand events &= ~EPOLLIN;
36220e10881SJason Ekstrand else
36320e10881SJason Ekstrand dcb->active = EPOLLIN;
36420e10881SJason Ekstrand spin_unlock_irq(&dmabuf->poll.lock);
36520e10881SJason Ekstrand
36620e10881SJason Ekstrand if (events & EPOLLIN) {
36720e10881SJason Ekstrand /* Paired with fput in dma_buf_poll_cb */
36820e10881SJason Ekstrand get_file(dmabuf->file);
36920e10881SJason Ekstrand
37020e10881SJason Ekstrand if (!dma_buf_poll_add_cb(resv, false, dcb))
37120e10881SJason Ekstrand /* No callback queued, wake up any other waiters */
37220e10881SJason Ekstrand dma_buf_poll_cb(NULL, &dcb->cb);
37320e10881SJason Ekstrand else
37420e10881SJason Ekstrand events &= ~EPOLLIN;
37520e10881SJason Ekstrand }
37620e10881SJason Ekstrand }
37720e10881SJason Ekstrand
37820e10881SJason Ekstrand dma_resv_unlock(resv);
37920e10881SJason Ekstrand return events;
38020e10881SJason Ekstrand }
38120e10881SJason Ekstrand
38220e10881SJason Ekstrand /**
38320e10881SJason Ekstrand * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
38420e10881SJason Ekstrand * It could support changing the name of the dma-buf if the same
38520e10881SJason Ekstrand * piece of memory is used for multiple purpose between different devices.
38620e10881SJason Ekstrand *
38720e10881SJason Ekstrand * @dmabuf: [in] dmabuf buffer that will be renamed.
38820e10881SJason Ekstrand * @buf: [in] A piece of userspace memory that contains the name of
38920e10881SJason Ekstrand * the dma-buf.
39020e10881SJason Ekstrand *
39159474049SJason Ekstrand * Returns 0 on success. If the dma-buf buffer is already attached to
39259474049SJason Ekstrand * devices, return -EBUSY.
39359474049SJason Ekstrand *
39459474049SJason Ekstrand */
dma_buf_set_name(struct dma_buf * dmabuf,const char __user * buf)39559474049SJason Ekstrand static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
396c19083c7SJason Ekstrand {
39759474049SJason Ekstrand char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
398c19083c7SJason Ekstrand
399c19083c7SJason Ekstrand if (IS_ERR(name))
40059474049SJason Ekstrand return PTR_ERR(name);
40159474049SJason Ekstrand
40259474049SJason Ekstrand spin_lock(&dmabuf->name_lock);
40359474049SJason Ekstrand kfree(dmabuf->name);
40459474049SJason Ekstrand dmabuf->name = name;
40559474049SJason Ekstrand spin_unlock(&dmabuf->name_lock);
40659474049SJason Ekstrand
40759474049SJason Ekstrand return 0;
40859474049SJason Ekstrand }
40959474049SJason Ekstrand
41059474049SJason Ekstrand #if IS_ENABLED(CONFIG_SYNC_FILE)
dma_buf_export_sync_file(struct dma_buf * dmabuf,void __user * user_data)41159474049SJason Ekstrand static long dma_buf_export_sync_file(struct dma_buf *dmabuf,
41259474049SJason Ekstrand void __user *user_data)
41359474049SJason Ekstrand {
41459474049SJason Ekstrand struct dma_buf_export_sync_file arg;
41559474049SJason Ekstrand enum dma_resv_usage usage;
41659474049SJason Ekstrand struct dma_fence *fence = NULL;
41759474049SJason Ekstrand struct sync_file *sync_file;
418c19083c7SJason Ekstrand int fd, ret;
419c19083c7SJason Ekstrand
420c19083c7SJason Ekstrand if (copy_from_user(&arg, user_data, sizeof(arg)))
421c19083c7SJason Ekstrand return -EFAULT;
422c19083c7SJason Ekstrand
42359474049SJason Ekstrand if (arg.flags & ~DMA_BUF_SYNC_RW)
42459474049SJason Ekstrand return -EINVAL;
425c19083c7SJason Ekstrand
426c19083c7SJason Ekstrand if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
427c19083c7SJason Ekstrand return -EINVAL;
428c19083c7SJason Ekstrand
429c19083c7SJason Ekstrand fd = get_unused_fd_flags(O_CLOEXEC);
43059474049SJason Ekstrand if (fd < 0)
43159474049SJason Ekstrand return fd;
432c19083c7SJason Ekstrand
43359474049SJason Ekstrand usage = dma_resv_usage_rw(arg.flags & DMA_BUF_SYNC_WRITE);
43459474049SJason Ekstrand ret = dma_resv_get_singleton(dmabuf->resv, usage, &fence);
43559474049SJason Ekstrand if (ret)
43659474049SJason Ekstrand goto err_put_fd;
43759474049SJason Ekstrand
43820e10881SJason Ekstrand if (!fence)
43920e10881SJason Ekstrand fence = dma_fence_get_stub();
440c11e391dSDaniel Vetter
441c11e391dSDaniel Vetter sync_file = sync_file_create(fence);
442c11e391dSDaniel Vetter
443c11e391dSDaniel Vetter dma_fence_put(fence);
444c11e391dSDaniel Vetter
445c11e391dSDaniel Vetter if (!sync_file) {
44618b862dcSChris Wilson ret = -ENOMEM;
447c11e391dSDaniel Vetter goto err_put_fd;
448c11e391dSDaniel Vetter }
449c11e391dSDaniel Vetter
450c11e391dSDaniel Vetter arg.fd = fd;
451c11e391dSDaniel Vetter if (copy_to_user(user_data, &arg, sizeof(arg))) {
452c11e391dSDaniel Vetter ret = -EFAULT;
453c11e391dSDaniel Vetter goto err_put_file;
454c11e391dSDaniel Vetter }
455c11e391dSDaniel Vetter
456c11e391dSDaniel Vetter fd_install(fd, sync_file->file);
457c11e391dSDaniel Vetter
458c11e391dSDaniel Vetter return 0;
459c11e391dSDaniel Vetter
460c11e391dSDaniel Vetter err_put_file:
461c11e391dSDaniel Vetter fput(sync_file->file);
462c11e391dSDaniel Vetter err_put_fd:
463c11e391dSDaniel Vetter put_unused_fd(fd);
464c11e391dSDaniel Vetter return ret;
465c11e391dSDaniel Vetter }
466c11e391dSDaniel Vetter
dma_buf_import_sync_file(struct dma_buf * dmabuf,const void __user * user_data)467c11e391dSDaniel Vetter static long dma_buf_import_sync_file(struct dma_buf *dmabuf,
468c11e391dSDaniel Vetter const void __user *user_data)
469c11e391dSDaniel Vetter {
470c11e391dSDaniel Vetter struct dma_buf_import_sync_file arg;
471c11e391dSDaniel Vetter struct dma_fence *fence, *f;
472c11e391dSDaniel Vetter enum dma_resv_usage usage;
47318b862dcSChris Wilson struct dma_fence_unwrap iter;
474c11e391dSDaniel Vetter unsigned int num_fences;
47518b862dcSChris Wilson int ret = 0;
476c11e391dSDaniel Vetter
47718b862dcSChris Wilson if (copy_from_user(&arg, user_data, sizeof(arg)))
478bb2bb903SGreg Hackmann return -EFAULT;
479a5bff92eSDaniel Vetter
480a5bff92eSDaniel Vetter if (arg.flags & ~DMA_BUF_SYNC_RW)
481bb2bb903SGreg Hackmann return -EINVAL;
482bb2bb903SGreg Hackmann
48320e10881SJason Ekstrand if ((arg.flags & DMA_BUF_SYNC_RW) == 0)
48420e10881SJason Ekstrand return -EINVAL;
48520e10881SJason Ekstrand
48659474049SJason Ekstrand fence = sync_file_get_fence(arg.fd);
48759474049SJason Ekstrand if (!fence)
48820e10881SJason Ekstrand return -EINVAL;
48920e10881SJason Ekstrand
490c11e391dSDaniel Vetter usage = (arg.flags & DMA_BUF_SYNC_WRITE) ? DMA_RESV_USAGE_WRITE :
491c11e391dSDaniel Vetter DMA_RESV_USAGE_READ;
492c11e391dSDaniel Vetter
493c11e391dSDaniel Vetter num_fences = 0;
494c11e391dSDaniel Vetter dma_fence_unwrap_for_each(f, &iter, fence)
495bcc07111SGreg Hackmann ++num_fences;
496bcc07111SGreg Hackmann
497bcc07111SGreg Hackmann if (num_fences > 0) {
498bcc07111SGreg Hackmann dma_resv_lock(dmabuf->resv, NULL);
499bcc07111SGreg Hackmann
500bcc07111SGreg Hackmann ret = dma_resv_reserve_fences(dmabuf->resv, num_fences);
501bcc07111SGreg Hackmann if (!ret) {
502bcc07111SGreg Hackmann dma_fence_unwrap_for_each(f, &iter, fence)
5036348dd29SCharan Teja Kalla dma_resv_add_fence(dmabuf->resv, f, usage);
504bcc07111SGreg Hackmann }
505bcc07111SGreg Hackmann
5066348dd29SCharan Teja Kalla dma_resv_unlock(dmabuf->resv);
507bcc07111SGreg Hackmann }
508bcc07111SGreg Hackmann
509d15bd7eeSSumit Semwal dma_fence_put(fence);
51005cd8469SCharan Teja Reddy
5114c78513eSDaniel Vetter return ret;
51219e8697bSChristopher James Halse Rogers }
5139b495a58SMaarten Lankhorst #endif
514c11e391dSDaniel Vetter
dma_buf_ioctl(struct file * file,unsigned int cmd,unsigned long arg)5151832f2d8SArnd Bergmann static long dma_buf_ioctl(struct file *file,
516bcc07111SGreg Hackmann unsigned int cmd, unsigned long arg)
517d15bd7eeSSumit Semwal {
518d15bd7eeSSumit Semwal struct dma_buf *dmabuf;
519d15bd7eeSSumit Semwal struct dma_buf_sync sync;
520d15bd7eeSSumit Semwal enum dma_data_direction direction;
521d15bd7eeSSumit Semwal int ret;
522d15bd7eeSSumit Semwal
523d15bd7eeSSumit Semwal dmabuf = file->private_data;
524d15bd7eeSSumit Semwal
525d15bd7eeSSumit Semwal switch (cmd) {
526d15bd7eeSSumit Semwal case DMA_BUF_IOCTL_SYNC:
527f728a5eaSChristian König if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
528ed63bb1dSGreg Hackmann return -EFAULT;
529370704e7SCharan Teja Kalla
530ed63bb1dSGreg Hackmann if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
531f728a5eaSChristian König return -EINVAL;
532ed63bb1dSGreg Hackmann
533ed63bb1dSGreg Hackmann switch (sync.flags & DMA_BUF_SYNC_RW) {
534ed63bb1dSGreg Hackmann case DMA_BUF_SYNC_READ:
535ed63bb1dSGreg Hackmann direction = DMA_FROM_DEVICE;
536f728a5eaSChristian König break;
537f728a5eaSChristian König case DMA_BUF_SYNC_WRITE:
538ed63bb1dSGreg Hackmann direction = DMA_TO_DEVICE;
539370704e7SCharan Teja Kalla break;
540370704e7SCharan Teja Kalla case DMA_BUF_SYNC_RW:
541370704e7SCharan Teja Kalla direction = DMA_BIDIRECTIONAL;
542370704e7SCharan Teja Kalla break;
543370704e7SCharan Teja Kalla default:
544370704e7SCharan Teja Kalla return -EINVAL;
545370704e7SCharan Teja Kalla }
54647091e4eSAl Viro
547ed63bb1dSGreg Hackmann if (sync.flags & DMA_BUF_SYNC_END)
548ed63bb1dSGreg Hackmann ret = dma_buf_end_cpu_access(dmabuf, direction);
549ed63bb1dSGreg Hackmann else
550ed63bb1dSGreg Hackmann ret = dma_buf_begin_cpu_access(dmabuf, direction);
551ed63bb1dSGreg Hackmann
552ed63bb1dSGreg Hackmann return ret;
553ed63bb1dSGreg Hackmann
554ed63bb1dSGreg Hackmann case DMA_BUF_SET_NAME_A:
555ed63bb1dSGreg Hackmann case DMA_BUF_SET_NAME_B:
556ed63bb1dSGreg Hackmann return dma_buf_set_name(dmabuf, (const char __user *)arg);
557ed63bb1dSGreg Hackmann
558ed63bb1dSGreg Hackmann #if IS_ENABLED(CONFIG_SYNC_FILE)
559d15bd7eeSSumit Semwal case DMA_BUF_IOCTL_EXPORT_SYNC_FILE:
5602904a8c1SDaniel Vetter return dma_buf_export_sync_file(dmabuf, (void __user *)arg);
5612904a8c1SDaniel Vetter case DMA_BUF_IOCTL_IMPORT_SYNC_FILE:
5622904a8c1SDaniel Vetter return dma_buf_import_sync_file(dmabuf, (const void __user *)arg);
5632904a8c1SDaniel Vetter #endif
5642904a8c1SDaniel Vetter
5652904a8c1SDaniel Vetter default:
5662904a8c1SDaniel Vetter return -ENOTTY;
5672904a8c1SDaniel Vetter }
5682904a8c1SDaniel Vetter }
5692904a8c1SDaniel Vetter
dma_buf_show_fdinfo(struct seq_file * m,struct file * file)5702904a8c1SDaniel Vetter static void dma_buf_show_fdinfo(struct seq_file *m, struct file *file)
5712904a8c1SDaniel Vetter {
572c138782dSLiviu Dudau struct dma_buf *dmabuf = file->private_data;
5732904a8c1SDaniel Vetter
5742904a8c1SDaniel Vetter seq_printf(m, "size:\t%zu\n", dmabuf->size);
5752904a8c1SDaniel Vetter /* Don't count the temporary reference taken inside procfs seq_show */
5762904a8c1SDaniel Vetter seq_printf(m, "count:\t%ld\n", file_count(dmabuf->file) - 1);
5772904a8c1SDaniel Vetter seq_printf(m, "exp_name:\t%s\n", dmabuf->exp_name);
578c138782dSLiviu Dudau spin_lock(&dmabuf->name_lock);
5792904a8c1SDaniel Vetter if (dmabuf->name)
5802904a8c1SDaniel Vetter seq_printf(m, "name:\t%s\n", dmabuf->name);
5812904a8c1SDaniel Vetter spin_unlock(&dmabuf->name_lock);
5822904a8c1SDaniel Vetter }
5832904a8c1SDaniel Vetter
58485804b70SDaniel Vetter static const struct file_operations dma_buf_fops = {
5852904a8c1SDaniel Vetter .release = dma_buf_file_release,
5862904a8c1SDaniel Vetter .mmap = dma_buf_mmap_internal,
5872904a8c1SDaniel Vetter .llseek = dma_buf_llseek,
5882904a8c1SDaniel Vetter .poll = dma_buf_poll,
5892904a8c1SDaniel Vetter .unlocked_ioctl = dma_buf_ioctl,
5902904a8c1SDaniel Vetter .compat_ioctl = compat_ptr_ioctl,
591d8fbe341SSumit Semwal .show_fdinfo = dma_buf_show_fdinfo,
592d15bd7eeSSumit Semwal };
593d15bd7eeSSumit Semwal
59478df9695SSumit Semwal /*
595d15bd7eeSSumit Semwal * is_dma_buf_file - Check if struct file* is associated with dma_buf
596d8fbe341SSumit Semwal */
is_dma_buf_file(struct file * file)597f641d3b5SDaniel Vetter static inline int is_dma_buf_file(struct file *file)
598d8fbe341SSumit Semwal {
599d15bd7eeSSumit Semwal return file->f_op == &dma_buf_fops;
60085804b70SDaniel Vetter }
60185804b70SDaniel Vetter
dma_buf_getfile(size_t size,int flags)60285804b70SDaniel Vetter static struct file *dma_buf_getfile(size_t size, int flags)
60385804b70SDaniel Vetter {
604d15bd7eeSSumit Semwal static atomic64_t dmabuf_inode = ATOMIC64_INIT(0);
6052904a8c1SDaniel Vetter struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
6062904a8c1SDaniel Vetter struct file *file;
607d15bd7eeSSumit Semwal
608d8fbe341SSumit Semwal if (IS_ERR(inode))
609d15bd7eeSSumit Semwal return ERR_CAST(inode);
610d15bd7eeSSumit Semwal
61152791eeeSChristian König inode->i_size = size;
612d15bd7eeSSumit Semwal inode_set_bytes(inode, size);
6133aac4502SMaarten Lankhorst
614a026df4cSChris Wilson /*
6155136629dSJagan Teki * The ->i_ino acquired from get_next_ino() is not unique thus
616f728a5eaSChristian König * not suitable for using it as dentry name by dmabuf stats.
617d8fbe341SSumit Semwal * Override ->i_ino with the unique and dmabuffs specific
618d8fbe341SSumit Semwal * value.
619f728a5eaSChristian König */
620d15bd7eeSSumit Semwal inode->i_ino = atomic64_inc_return(&dmabuf_inode);
621d15bd7eeSSumit Semwal flags &= O_ACCMODE | O_NONBLOCK;
62215fd552dSChristian König file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
623bd2275eeSChristian König flags, &dma_buf_fops);
62415fd552dSChristian König if (IS_ERR(file))
62515fd552dSChristian König goto err_alloc_file;
626bd2275eeSChristian König
627d15bd7eeSSumit Semwal return file;
628d15bd7eeSSumit Semwal
6299abdffe2SSumit Semwal err_alloc_file:
6309abdffe2SSumit Semwal iput(inode);
6319abdffe2SSumit Semwal return file;
632f728a5eaSChristian König }
633f728a5eaSChristian König
634f728a5eaSChristian König /**
635f728a5eaSChristian König * DOC: dma buf device access
636f728a5eaSChristian König *
637f728a5eaSChristian König * For device DMA access to a shared DMA buffer the usual sequence of operations
638f728a5eaSChristian König * is fairly simple:
639f728a5eaSChristian König *
640f728a5eaSChristian König * 1. The exporter defines his exporter instance using
641f728a5eaSChristian König * DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
642f728a5eaSChristian König * buffer object into a &dma_buf. It then exports that &dma_buf to userspace
6433aac4502SMaarten Lankhorst * as a file descriptor by calling dma_buf_fd().
6449abdffe2SSumit Semwal *
645a026df4cSChris Wilson * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
646f728a5eaSChristian König * to share with: First the file descriptor is converted to a &dma_buf using
6479abdffe2SSumit Semwal * dma_buf_get(). Then the buffer is attached to the device using
648d15bd7eeSSumit Semwal * dma_buf_attach().
649d8fbe341SSumit Semwal *
650d8fbe341SSumit Semwal * Up to this stage the exporter is still free to migrate or reallocate the
651d8fbe341SSumit Semwal * backing storage.
652d8fbe341SSumit Semwal *
6539abdffe2SSumit Semwal * 3. Once the buffer is attached to all devices userspace can initiate DMA
6546348dd29SCharan Teja Kalla * access to the shared buffer. In the kernel this is done by calling
6559b495a58SMaarten Lankhorst * dma_buf_map_attachment() and dma_buf_unmap_attachment().
6566b51b02aSChristian König *
6576b51b02aSChristian König * 4. Once a driver is done with a shared buffer it needs to call
658d15bd7eeSSumit Semwal * dma_buf_detach() (after cleaning up any mappings) and then release the
659d1aa06a1SLaurent Pinchart * reference acquired with dma_buf_get() by calling dma_buf_put().
660d15bd7eeSSumit Semwal *
661f728a5eaSChristian König * For the detailed semantics exporters are expected to implement see
662f728a5eaSChristian König * &dma_buf_ops.
663f728a5eaSChristian König */
664d15bd7eeSSumit Semwal
665d15bd7eeSSumit Semwal /**
666d15bd7eeSSumit Semwal * dma_buf_export - Creates a new dma_buf, and associates an anon file
667f728a5eaSChristian König * with this buffer, so it can be exported.
668f728a5eaSChristian König * Also connect the allocator specific data and ops to the buffer.
669f728a5eaSChristian König * Additionally, provide a name string for exporter; useful in debugging.
670d15bd7eeSSumit Semwal *
671f728a5eaSChristian König * @exp_info: [in] holds all the export related information provided
672f728a5eaSChristian König * by the exporter. see &struct dma_buf_export_info
673f728a5eaSChristian König * for further details.
674d15bd7eeSSumit Semwal *
675d15bd7eeSSumit Semwal * Returns, on success, a newly created struct dma_buf object, which wraps the
676d15bd7eeSSumit Semwal * supplied private data and operations for struct dma_buf_ops. On either
677d15bd7eeSSumit Semwal * missing ops, or error in allocating struct dma_buf, will return negative
678d15bd7eeSSumit Semwal * error.
679d15bd7eeSSumit Semwal *
680a026df4cSChris Wilson * For most cases the easiest way to create @exp_info is through the
681a026df4cSChris Wilson * %DEFINE_DMA_BUF_EXPORT_INFO macro.
682f728a5eaSChristian König */
dma_buf_export(const struct dma_buf_export_info * exp_info)683f728a5eaSChristian König struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
684a026df4cSChris Wilson {
685f728a5eaSChristian König struct dma_buf *dmabuf;
686f728a5eaSChristian König struct dma_resv *resv = exp_info->resv;
687a026df4cSChris Wilson struct file *file;
688a026df4cSChris Wilson size_t alloc_size = sizeof(struct dma_buf);
689a026df4cSChris Wilson int ret;
690d15bd7eeSSumit Semwal
69116b0314aSGreg Kroah-Hartman if (WARN_ON(!exp_info->priv || !exp_info->ops
692d15bd7eeSSumit Semwal || !exp_info->ops->map_dma_buf
693d15bd7eeSSumit Semwal || !exp_info->ops->unmap_dma_buf
69485804b70SDaniel Vetter || !exp_info->ops->release))
695d15bd7eeSSumit Semwal return ERR_PTR(-EINVAL);
696d15bd7eeSSumit Semwal
697d15bd7eeSSumit Semwal if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
698d15bd7eeSSumit Semwal return ERR_PTR(-EINVAL);
699d15bd7eeSSumit Semwal
700d15bd7eeSSumit Semwal if (!try_module_get(exp_info->owner))
701d15bd7eeSSumit Semwal return ERR_PTR(-ENOENT);
702d15bd7eeSSumit Semwal
703d15bd7eeSSumit Semwal file = dma_buf_getfile(exp_info->size, exp_info->flags);
704d15bd7eeSSumit Semwal if (IS_ERR(file)) {
705d15bd7eeSSumit Semwal ret = PTR_ERR(file);
706d15bd7eeSSumit Semwal goto err_module;
707d15bd7eeSSumit Semwal }
708d15bd7eeSSumit Semwal
709d15bd7eeSSumit Semwal if (!exp_info->resv)
710d15bd7eeSSumit Semwal alloc_size += sizeof(struct dma_resv);
711d15bd7eeSSumit Semwal else
712d15bd7eeSSumit Semwal /* prevent &dma_buf[1] == dma_buf->resv */
713d15bd7eeSSumit Semwal alloc_size += 1;
714d15bd7eeSSumit Semwal dmabuf = kzalloc(alloc_size, GFP_KERNEL);
71516b0314aSGreg Kroah-Hartman if (!dmabuf) {
716d15bd7eeSSumit Semwal ret = -ENOMEM;
717d15bd7eeSSumit Semwal goto err_file;
71885804b70SDaniel Vetter }
71985804b70SDaniel Vetter
720d15bd7eeSSumit Semwal dmabuf->priv = exp_info->priv;
72185804b70SDaniel Vetter dmabuf->ops = exp_info->ops;
722d15bd7eeSSumit Semwal dmabuf->size = exp_info->size;
723d15bd7eeSSumit Semwal dmabuf->exp_name = exp_info->exp_name;
724d15bd7eeSSumit Semwal dmabuf->owner = exp_info->owner;
725d15bd7eeSSumit Semwal spin_lock_init(&dmabuf->name_lock);
726d15bd7eeSSumit Semwal init_waitqueue_head(&dmabuf->poll);
727d15bd7eeSSumit Semwal dmabuf->cb_in.poll = dmabuf->cb_out.poll = &dmabuf->poll;
728d15bd7eeSSumit Semwal dmabuf->cb_in.active = dmabuf->cb_out.active = 0;
729d15bd7eeSSumit Semwal INIT_LIST_HEAD(&dmabuf->attachments);
730d15bd7eeSSumit Semwal
731d15bd7eeSSumit Semwal if (!resv) {
732d15bd7eeSSumit Semwal dmabuf->resv = (struct dma_resv *)&dmabuf[1];
733d15bd7eeSSumit Semwal dma_resv_init(dmabuf->resv);
734d15bd7eeSSumit Semwal } else {
735d15bd7eeSSumit Semwal dmabuf->resv = resv;
736d15bd7eeSSumit Semwal }
737d15bd7eeSSumit Semwal
738d15bd7eeSSumit Semwal ret = dma_buf_stats_setup(dmabuf, file);
739d15bd7eeSSumit Semwal if (ret)
740d15bd7eeSSumit Semwal goto err_dmabuf;
74116b0314aSGreg Kroah-Hartman
742d15bd7eeSSumit Semwal file->private_data = dmabuf;
743d15bd7eeSSumit Semwal file->f_path.dentry->d_fsdata = dmabuf;
744d15bd7eeSSumit Semwal dmabuf->file = file;
745d15bd7eeSSumit Semwal
746d15bd7eeSSumit Semwal __dma_buf_list_add(dmabuf);
7472904a8c1SDaniel Vetter
7482904a8c1SDaniel Vetter return dmabuf;
7492904a8c1SDaniel Vetter
750e9b4d7b5SDaniel Vetter err_dmabuf:
751e9b4d7b5SDaniel Vetter if (!resv)
752d15bd7eeSSumit Semwal dma_resv_fini(dmabuf->resv);
753d15bd7eeSSumit Semwal kfree(dmabuf);
754d15bd7eeSSumit Semwal err_file:
755d15bd7eeSSumit Semwal fput(file);
756d15bd7eeSSumit Semwal err_module:
757d15bd7eeSSumit Semwal module_put(exp_info->owner);
758d15bd7eeSSumit Semwal return ERR_PTR(ret);
759d15bd7eeSSumit Semwal }
76016b0314aSGreg Kroah-Hartman EXPORT_SYMBOL_NS_GPL(dma_buf_export, "DMA_BUF");
761d15bd7eeSSumit Semwal
76284335675SDaniel Vetter /**
76384335675SDaniel Vetter * dma_buf_fd - returns a file descriptor for the given struct dma_buf
76484335675SDaniel Vetter * @dmabuf: [in] pointer to dma_buf for which fd is required.
76584335675SDaniel Vetter * @flags: [in] flags to give to fd
76684335675SDaniel Vetter *
76784335675SDaniel Vetter * On success, returns an associated 'fd'. Else, returns error.
76884335675SDaniel Vetter */
dma_buf_fd(struct dma_buf * dmabuf,int flags)76984335675SDaniel Vetter int dma_buf_fd(struct dma_buf *dmabuf, int flags)
77084335675SDaniel Vetter {
77184335675SDaniel Vetter int fd;
77284335675SDaniel Vetter
77384335675SDaniel Vetter if (!dmabuf || !dmabuf->file)
77484335675SDaniel Vetter return -EINVAL;
77584335675SDaniel Vetter
77684335675SDaniel Vetter fd = get_unused_fd_flags(flags);
77784335675SDaniel Vetter if (fd < 0)
77884335675SDaniel Vetter return fd;
77984335675SDaniel Vetter
78084335675SDaniel Vetter fd_install(fd, dmabuf->file);
78146b35b33SChristian König
78284335675SDaniel Vetter return fd;
78384335675SDaniel Vetter }
78446b35b33SChristian König EXPORT_SYMBOL_NS_GPL(dma_buf_fd, "DMA_BUF");
78546b35b33SChristian König
78684335675SDaniel Vetter /**
78746b35b33SChristian König * dma_buf_get - returns the struct dma_buf related to an fd
78846b35b33SChristian König * @fd: [in] fd associated with the struct dma_buf to be returned
78946b35b33SChristian König *
79046b35b33SChristian König * On success, returns the struct dma_buf associated with an fd; uses
79146b35b33SChristian König * file's refcounting done by fget to increase refcount. returns ERR_PTR
79246b35b33SChristian König * otherwise.
79346b35b33SChristian König */
dma_buf_get(int fd)79446b35b33SChristian König struct dma_buf *dma_buf_get(int fd)
79546b35b33SChristian König {
79646b35b33SChristian König struct file *file;
79746b35b33SChristian König
79884335675SDaniel Vetter file = fget(fd);
79984335675SDaniel Vetter
80084335675SDaniel Vetter if (!file)
80184335675SDaniel Vetter return ERR_PTR(-EBADF);
802d15bd7eeSSumit Semwal
803ae2e7f28SDmitry Osipenko if (!is_dma_buf_file(file)) {
804ae2e7f28SDmitry Osipenko fput(file);
805ae2e7f28SDmitry Osipenko return ERR_PTR(-EINVAL);
806ae2e7f28SDmitry Osipenko }
807ae2e7f28SDmitry Osipenko
808ae2e7f28SDmitry Osipenko return file->private_data;
809ae2e7f28SDmitry Osipenko }
810ae2e7f28SDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_get, "DMA_BUF");
811ae2e7f28SDmitry Osipenko
812ae2e7f28SDmitry Osipenko /**
813ae2e7f28SDmitry Osipenko * dma_buf_put - decreases refcount of the buffer
814ae2e7f28SDmitry Osipenko * @dmabuf: [in] buffer to reduce refcount of
815ae2e7f28SDmitry Osipenko *
816ae2e7f28SDmitry Osipenko * Uses file's refcounting done implicitly by fput().
817ae2e7f28SDmitry Osipenko *
818ae2e7f28SDmitry Osipenko * If, as a result of this call, the refcount becomes 0, the 'release' file
819ae2e7f28SDmitry Osipenko * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
820ae2e7f28SDmitry Osipenko * in turn, and frees the memory allocated for dmabuf when exported.
821ae2e7f28SDmitry Osipenko */
dma_buf_put(struct dma_buf * dmabuf)822ae2e7f28SDmitry Osipenko void dma_buf_put(struct dma_buf *dmabuf)
823ae2e7f28SDmitry Osipenko {
824ae2e7f28SDmitry Osipenko if (WARN_ON(!dmabuf || !dmabuf->file))
825ae2e7f28SDmitry Osipenko return;
826e3ecbd21SMaíra Canal
827ae2e7f28SDmitry Osipenko fput(dmabuf->file);
828ae2e7f28SDmitry Osipenko }
829ae2e7f28SDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_put, "DMA_BUF");
830ae2e7f28SDmitry Osipenko
mangle_sg_table(struct sg_table * sg_table)831ae2e7f28SDmitry Osipenko static void mangle_sg_table(struct sg_table *sg_table)
832ae2e7f28SDmitry Osipenko {
833ae2e7f28SDmitry Osipenko #ifdef CONFIG_DMABUF_DEBUG
834ae2e7f28SDmitry Osipenko int i;
835ae2e7f28SDmitry Osipenko struct scatterlist *sg;
836ae2e7f28SDmitry Osipenko
837ae2e7f28SDmitry Osipenko /* To catch abuse of the underlying struct page by importers mix
838ae2e7f28SDmitry Osipenko * up the bits, but take care to preserve the low SG_ bits to
839ae2e7f28SDmitry Osipenko * not corrupt the sgt. The mixing is undone on unmap
840ae2e7f28SDmitry Osipenko * before passing the sgt back to the exporter.
841ae2e7f28SDmitry Osipenko */
842ae2e7f28SDmitry Osipenko for_each_sgtable_sg(sg_table, sg, i)
843ae2e7f28SDmitry Osipenko sg->page_link ^= ~0xffUL;
844ae2e7f28SDmitry Osipenko #endif
845ae2e7f28SDmitry Osipenko
846ae2e7f28SDmitry Osipenko }
847ae2e7f28SDmitry Osipenko
8488021fa16SDmitry Osipenko static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment * attach)849ae2e7f28SDmitry Osipenko dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
850ae2e7f28SDmitry Osipenko {
851ae2e7f28SDmitry Osipenko return !!attach->importer_ops;
852ae2e7f28SDmitry Osipenko }
853ae2e7f28SDmitry Osipenko
854ae2e7f28SDmitry Osipenko static bool
dma_buf_pin_on_map(struct dma_buf_attachment * attach)855ae2e7f28SDmitry Osipenko dma_buf_pin_on_map(struct dma_buf_attachment *attach)
856ae2e7f28SDmitry Osipenko {
857ae2e7f28SDmitry Osipenko return attach->dmabuf->ops->pin &&
858ae2e7f28SDmitry Osipenko (!dma_buf_attachment_is_dynamic(attach) ||
859ae2e7f28SDmitry Osipenko !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY));
860ae2e7f28SDmitry Osipenko }
861ae2e7f28SDmitry Osipenko
862ae2e7f28SDmitry Osipenko /**
863ae2e7f28SDmitry Osipenko * DOC: locking convention
864ae2e7f28SDmitry Osipenko *
865ae2e7f28SDmitry Osipenko * In order to avoid deadlock situations between dma-buf exports and importers,
866ae2e7f28SDmitry Osipenko * all dma-buf API users must follow the common dma-buf locking convention.
86785804b70SDaniel Vetter *
868d15bd7eeSSumit Semwal * Convention for importers
869d15bd7eeSSumit Semwal *
8706f49c251SRandy Dunlap * 1. Importers must hold the dma-buf reservation lock when calling these
8716f49c251SRandy Dunlap * functions:
872d15bd7eeSSumit Semwal *
8732904a8c1SDaniel Vetter * - dma_buf_pin()
8742904a8c1SDaniel Vetter * - dma_buf_unpin()
8752904a8c1SDaniel Vetter * - dma_buf_map_attachment()
87685804b70SDaniel Vetter * - dma_buf_unmap_attachment()
87785804b70SDaniel Vetter * - dma_buf_vmap()
87885804b70SDaniel Vetter * - dma_buf_vunmap()
8792904a8c1SDaniel Vetter *
8802904a8c1SDaniel Vetter * 2. Importers must not hold the dma-buf reservation lock when calling these
8812904a8c1SDaniel Vetter * functions:
8822904a8c1SDaniel Vetter *
8832904a8c1SDaniel Vetter * - dma_buf_attach()
8842904a8c1SDaniel Vetter * - dma_buf_dynamic_attach()
8852904a8c1SDaniel Vetter * - dma_buf_detach()
8862904a8c1SDaniel Vetter * - dma_buf_export()
887d15bd7eeSSumit Semwal * - dma_buf_fd()
88815fd552dSChristian König * - dma_buf_get()
88915fd552dSChristian König * - dma_buf_put()
890bb42df46SChristian König * - dma_buf_mmap()
891bb42df46SChristian König * - dma_buf_begin_cpu_access()
892d15bd7eeSSumit Semwal * - dma_buf_end_cpu_access()
893d15bd7eeSSumit Semwal * - dma_buf_map_attachment_unlocked()
894d15bd7eeSSumit Semwal * - dma_buf_unmap_attachment_unlocked()
895d15bd7eeSSumit Semwal * - dma_buf_vmap_unlocked()
896d15bd7eeSSumit Semwal * - dma_buf_vunmap_unlocked()
897d15bd7eeSSumit Semwal *
898d15bd7eeSSumit Semwal * Convention for exporters
8994981cdb0SChristian König *
9004981cdb0SChristian König * 1. These &dma_buf_ops callbacks are invoked with unlocked dma-buf
9014981cdb0SChristian König * reservation and exporter can take the lock:
902db7942b6SMarkus Elfring *
90334d84ec4SMarkus Elfring * - &dma_buf_ops.attach()
904a9fbc3b7SLaurent Pinchart * - &dma_buf_ops.detach()
905d15bd7eeSSumit Semwal * - &dma_buf_ops.release()
906d15bd7eeSSumit Semwal * - &dma_buf_ops.begin_cpu_access()
907d15bd7eeSSumit Semwal * - &dma_buf_ops.end_cpu_access()
90809606b54SChristian König * - &dma_buf_ops.mmap()
90909606b54SChristian König *
910bb42df46SChristian König * 2. These &dma_buf_ops callbacks are invoked with locked dma-buf
911bb42df46SChristian König * reservation and exporter can't take the lock:
9122ed9201bSLaurent Pinchart *
913d15bd7eeSSumit Semwal * - &dma_buf_ops.pin()
914a19741e5SChristian König * - &dma_buf_ops.unpin()
915d15bd7eeSSumit Semwal * - &dma_buf_ops.map_dma_buf()
916d15bd7eeSSumit Semwal * - &dma_buf_ops.unmap_dma_buf()
917d15bd7eeSSumit Semwal * - &dma_buf_ops.vmap()
91815fd552dSChristian König * - &dma_buf_ops.vunmap()
919d15bd7eeSSumit Semwal *
92015fd552dSChristian König * 3. Exporters must hold the dma-buf reservation lock when calling these
921d15bd7eeSSumit Semwal * functions:
92215fd552dSChristian König *
92315fd552dSChristian König * - dma_buf_move_notify()
92415fd552dSChristian König */
92515fd552dSChristian König
92615fd552dSChristian König /**
92715fd552dSChristian König * dma_buf_dynamic_attach - Add the device to dma_buf's attachments list
92815fd552dSChristian König * @dmabuf: [in] buffer to attach device to.
92915fd552dSChristian König * @dev: [in] device to be attached.
93015fd552dSChristian König * @importer_ops: [in] importer operations for the attachment
931809d9c72SDmitry Osipenko * @importer_priv: [in] importer private pointer for the attachment
9327e008b02SChristian König *
933bb42df46SChristian König * Returns struct dma_buf_attachment pointer for this attachment. Attachments
934bb42df46SChristian König * must be cleaned up by calling dma_buf_detach().
935bb42df46SChristian König *
93615fd552dSChristian König * Optionally this calls &dma_buf_ops.attach to allow device-specific attach
93784335675SDaniel Vetter * functionality.
93815fd552dSChristian König *
93915fd552dSChristian König * Returns:
94015fd552dSChristian König *
94115fd552dSChristian König * A pointer to newly created &dma_buf_attachment on success, or a negative
942bb42df46SChristian König * error code wrapped into a pointer on failure.
94315fd552dSChristian König *
94415fd552dSChristian König * Note that this can fail if the backing storage of @dmabuf is in a place not
94515fd552dSChristian König * accessible to @dev, and cannot be moved to a more suitable place. This is
94615fd552dSChristian König * indicated with the error code -EBUSY.
94715fd552dSChristian König */
94815fd552dSChristian König struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf * dmabuf,struct device * dev,const struct dma_buf_attach_ops * importer_ops,void * importer_priv)949d15bd7eeSSumit Semwal dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
950d15bd7eeSSumit Semwal const struct dma_buf_attach_ops *importer_ops,
951d15bd7eeSSumit Semwal void *importer_priv)
952d15bd7eeSSumit Semwal {
953d15bd7eeSSumit Semwal struct dma_buf_attachment *attach;
95415fd552dSChristian König int ret;
955bb42df46SChristian König
956bb42df46SChristian König if (WARN_ON(!dmabuf || !dev))
9577e008b02SChristian König return ERR_PTR(-EINVAL);
958bb42df46SChristian König
95915fd552dSChristian König if (WARN_ON(importer_ops && !importer_ops->move_notify))
96015fd552dSChristian König return ERR_PTR(-EINVAL);
96115fd552dSChristian König
96215fd552dSChristian König attach = kzalloc(sizeof(*attach), GFP_KERNEL);
96315fd552dSChristian König if (!attach)
96415fd552dSChristian König return ERR_PTR(-ENOMEM);
96516b0314aSGreg Kroah-Hartman
96615fd552dSChristian König attach->dev = dev;
96715fd552dSChristian König attach->dmabuf = dmabuf;
96815fd552dSChristian König if (importer_ops)
96915fd552dSChristian König attach->peer2peer = importer_ops->allow_peer2peer;
97015fd552dSChristian König attach->importer_ops = importer_ops;
97115fd552dSChristian König attach->importer_priv = importer_priv;
97215fd552dSChristian König
97315fd552dSChristian König if (dmabuf->ops->attach) {
97415fd552dSChristian König ret = dmabuf->ops->attach(dmabuf, attach);
97515fd552dSChristian König if (ret)
97615fd552dSChristian König goto err_attach;
97715fd552dSChristian König }
978bb42df46SChristian König dma_resv_lock(dmabuf->resv, NULL);
979d15bd7eeSSumit Semwal list_add(&attach->node, &dmabuf->attachments);
98016b0314aSGreg Kroah-Hartman dma_resv_unlock(dmabuf->resv);
981d15bd7eeSSumit Semwal
98284335675SDaniel Vetter return attach;
98384335675SDaniel Vetter
98484335675SDaniel Vetter err_attach:
98584335675SDaniel Vetter kfree(attach);
98684335675SDaniel Vetter return ERR_PTR(ret);
98784335675SDaniel Vetter }
98884335675SDaniel Vetter EXPORT_SYMBOL_NS_GPL(dma_buf_dynamic_attach, "DMA_BUF");
98984335675SDaniel Vetter
99084335675SDaniel Vetter /**
99184335675SDaniel Vetter * dma_buf_attach - Wrapper for dma_buf_dynamic_attach
992d15bd7eeSSumit Semwal * @dmabuf: [in] buffer to attach device to.
99385804b70SDaniel Vetter * @dev: [in] device to be attached.
994d15bd7eeSSumit Semwal *
995d15bd7eeSSumit Semwal * Wrapper to call dma_buf_dynamic_attach() for drivers which still use a static
996d15bd7eeSSumit Semwal * mapping.
9972904a8c1SDaniel Vetter */
dma_buf_attach(struct dma_buf * dmabuf,struct device * dev)99885804b70SDaniel Vetter struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
99985804b70SDaniel Vetter struct device *dev)
1000d15bd7eeSSumit Semwal {
1001d15bd7eeSSumit Semwal return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
1002d15bd7eeSSumit Semwal }
1003d3292daeSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_attach, "DMA_BUF");
1004d15bd7eeSSumit Semwal
1005d15bd7eeSSumit Semwal /**
1006d3292daeSDmitry Osipenko * dma_buf_detach - Remove the given attachment from dmabuf's attachments list
100715fd552dSChristian König * @dmabuf: [in] buffer to detach from.
1008d15bd7eeSSumit Semwal * @attach: [in] attachment to be detached; is free'd after this call.
1009f13e143eSChristian König *
101084335675SDaniel Vetter * Clean up a device attachment obtained by calling dma_buf_attach().
1011f13e143eSChristian König *
1012809d9c72SDmitry Osipenko * Optionally this calls &dma_buf_ops.detach for device-specific detach.
10137e008b02SChristian König */
dma_buf_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)101415fd552dSChristian König void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
1015d15bd7eeSSumit Semwal {
1016809d9c72SDmitry Osipenko if (WARN_ON(!dmabuf || !attach || dmabuf != attach->dmabuf))
101715fd552dSChristian König return;
1018809d9c72SDmitry Osipenko
1019d15bd7eeSSumit Semwal dma_resv_lock(dmabuf->resv, NULL);
1020d15bd7eeSSumit Semwal list_del(&attach->node);
1021d15bd7eeSSumit Semwal dma_resv_unlock(dmabuf->resv);
1022d15bd7eeSSumit Semwal
1023d15bd7eeSSumit Semwal if (dmabuf->ops->detach)
102416b0314aSGreg Kroah-Hartman dmabuf->ops->detach(dmabuf, attach);
1025d15bd7eeSSumit Semwal
1026d15bd7eeSSumit Semwal kfree(attach);
1027bb42df46SChristian König }
1028bb42df46SChristian König EXPORT_SYMBOL_NS_GPL(dma_buf_detach, "DMA_BUF");
1029bb42df46SChristian König
1030c545781eSDaniel Vetter /**
1031c545781eSDaniel Vetter * dma_buf_pin - Lock down the DMA-buf
1032c545781eSDaniel Vetter * @attach: [in] attachment which should be pinned
1033c545781eSDaniel Vetter *
1034c545781eSDaniel Vetter * Only dynamic importers (who set up @attach with dma_buf_dynamic_attach()) may
1035c545781eSDaniel Vetter * call this, and only for limited use cases like scanout and not for temporary
1036c545781eSDaniel Vetter * pin operations. It is not permitted to allow userspace to pin arbitrary
1037bb42df46SChristian König * amounts of buffers through this interface.
1038bb42df46SChristian König *
1039bb42df46SChristian König * Buffers must be unpinned by calling dma_buf_unpin().
1040bb42df46SChristian König *
1041bb42df46SChristian König * Returns:
1042bb42df46SChristian König * 0 on success, negative error code on failure.
1043bb42df46SChristian König */
dma_buf_pin(struct dma_buf_attachment * attach)1044bb42df46SChristian König int dma_buf_pin(struct dma_buf_attachment *attach)
1045c545781eSDaniel Vetter {
1046c545781eSDaniel Vetter struct dma_buf *dmabuf = attach->dmabuf;
1047bb42df46SChristian König int ret = 0;
1048bb42df46SChristian König
1049bb42df46SChristian König WARN_ON(!attach->importer_ops);
1050bb42df46SChristian König
1051bb42df46SChristian König dma_resv_assert_held(dmabuf->resv);
1052bb42df46SChristian König
1053bb42df46SChristian König if (dmabuf->ops->pin)
105416b0314aSGreg Kroah-Hartman ret = dmabuf->ops->pin(attach);
1055bb42df46SChristian König
1056bb42df46SChristian König return ret;
1057c545781eSDaniel Vetter }
1058bb42df46SChristian König EXPORT_SYMBOL_NS_GPL(dma_buf_pin, "DMA_BUF");
1059c545781eSDaniel Vetter
1060c545781eSDaniel Vetter /**
1061c545781eSDaniel Vetter * dma_buf_unpin - Unpin a DMA-buf
1062c545781eSDaniel Vetter * @attach: [in] attachment which should be unpinned
1063bb42df46SChristian König *
1064bb42df46SChristian König * This unpins a buffer pinned by dma_buf_pin() and allows the exporter to move
1065bb42df46SChristian König * any mapping of @attach again and inform the importer through
1066bb42df46SChristian König * &dma_buf_attach_ops.move_notify.
1067bb42df46SChristian König */
dma_buf_unpin(struct dma_buf_attachment * attach)1068c545781eSDaniel Vetter void dma_buf_unpin(struct dma_buf_attachment *attach)
1069c545781eSDaniel Vetter {
1070bb42df46SChristian König struct dma_buf *dmabuf = attach->dmabuf;
1071bb42df46SChristian König
1072bb42df46SChristian König WARN_ON(!attach->importer_ops);
1073bb42df46SChristian König
1074bb42df46SChristian König dma_resv_assert_held(dmabuf->resv);
107516b0314aSGreg Kroah-Hartman
1076bb42df46SChristian König if (dmabuf->ops->unpin)
1077bb42df46SChristian König dmabuf->ops->unpin(attach);
1078d15bd7eeSSumit Semwal }
1079d15bd7eeSSumit Semwal EXPORT_SYMBOL_NS_GPL(dma_buf_unpin, "DMA_BUF");
1080d15bd7eeSSumit Semwal
1081d15bd7eeSSumit Semwal /**
1082d15bd7eeSSumit Semwal * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
1083d15bd7eeSSumit Semwal * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
1084fee0c54eSColin Cross * dma_buf_ops.
10852904a8c1SDaniel Vetter * @attach: [in] attachment whose scatterlist is to be returned
10862904a8c1SDaniel Vetter * @direction: [in] direction of DMA transfer
1087ac80cd17SJianxin Xiong *
1088ac80cd17SJianxin Xiong * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
1089ac80cd17SJianxin Xiong * on error. May return -EINTR if it is interrupted by a signal.
1090c138782dSLiviu Dudau *
10912904a8c1SDaniel Vetter * On success, the DMA addresses and lengths in the returned scatterlist are
10922904a8c1SDaniel Vetter * PAGE_SIZE aligned.
10932904a8c1SDaniel Vetter *
109489bcadc8SDaniel Vetter * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
109589bcadc8SDaniel Vetter * the underlying backing storage is pinned for as long as a mapping exists,
109689bcadc8SDaniel Vetter * therefore users/importers should not hold onto a mapping for undue amounts of
1097d15bd7eeSSumit Semwal * time.
1098d15bd7eeSSumit Semwal *
1099d15bd7eeSSumit Semwal * Important: Dynamic importers must wait for the exclusive fence of the struct
1100d15bd7eeSSumit Semwal * dma_resv attached to the DMA-BUF first.
1101531beb06SColin Ian King */
dma_buf_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)1102bb42df46SChristian König struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
1103d15bd7eeSSumit Semwal enum dma_data_direction direction)
1104d15bd7eeSSumit Semwal {
1105d15bd7eeSSumit Semwal struct sg_table *sg_table;
1106d1aa06a1SLaurent Pinchart signed long ret;
1107d15bd7eeSSumit Semwal
1108d15bd7eeSSumit Semwal might_sleep();
110915fd552dSChristian König
111015fd552dSChristian König if (WARN_ON(!attach || !attach->dmabuf))
1111f13e143eSChristian König return ERR_PTR(-EINVAL);
1112f13e143eSChristian König
1113f13e143eSChristian König dma_resv_assert_held(attach->dmabuf->resv);
1114f13e143eSChristian König
1115f13e143eSChristian König if (dma_buf_pin_on_map(attach)) {
1116f13e143eSChristian König ret = attach->dmabuf->ops->pin(attach);
1117f13e143eSChristian König /*
1118f13e143eSChristian König * Catch exporters making buffers inaccessible even when
1119f13e143eSChristian König * attachments preventing that exist.
1120f13e143eSChristian König */
1121f13e143eSChristian König WARN_ON_ONCE(ret == -EBUSY);
1122f13e143eSChristian König if (ret)
1123bb42df46SChristian König return ERR_PTR(ret);
11244981cdb0SChristian König }
11257e008b02SChristian König
1126bb42df46SChristian König sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
1127bb42df46SChristian König if (!sg_table)
1128bb42df46SChristian König sg_table = ERR_PTR(-ENOMEM);
1129bb42df46SChristian König if (IS_ERR(sg_table))
113015fd552dSChristian König goto error_unpin;
113184335675SDaniel Vetter
1132fee0c54eSColin Cross /*
1133fee0c54eSColin Cross * Importers with static attachments don't wait for fences.
1134d15bd7eeSSumit Semwal */
1135bb42df46SChristian König if (!dma_buf_attachment_is_dynamic(attach)) {
11364981cdb0SChristian König ret = dma_resv_wait_timeout(attach->dmabuf->resv,
11377e008b02SChristian König DMA_RESV_USAGE_KERNEL, true,
1138bb42df46SChristian König MAX_SCHEDULE_TIMEOUT);
1139f13e143eSChristian König if (ret < 0)
1140f13e143eSChristian König goto error_unmap;
1141f13e143eSChristian König }
1142f13e143eSChristian König mangle_sg_table(sg_table);
1143f13e143eSChristian König
1144ac80cd17SJianxin Xiong #ifdef CONFIG_DMA_API_DEBUG
114500efd65aSJianxin Xiong {
1146ac80cd17SJianxin Xiong struct scatterlist *sg;
1147ac80cd17SJianxin Xiong u64 addr;
1148ac80cd17SJianxin Xiong int len;
1149ac80cd17SJianxin Xiong int i;
1150ac80cd17SJianxin Xiong
1151ac80cd17SJianxin Xiong for_each_sgtable_dma_sg(sg_table, sg, i) {
1152ac80cd17SJianxin Xiong addr = sg_dma_address(sg);
1153ac80cd17SJianxin Xiong len = sg_dma_len(sg);
1154ac80cd17SJianxin Xiong if (!PAGE_ALIGNED(addr) || !PAGE_ALIGNED(len)) {
1155ac80cd17SJianxin Xiong pr_debug("%s: addr %llx or len %x is not page aligned!\n",
1156ac80cd17SJianxin Xiong __func__, addr, len);
1157ac80cd17SJianxin Xiong }
1158ac80cd17SJianxin Xiong }
1159ac80cd17SJianxin Xiong }
1160ac80cd17SJianxin Xiong #endif /* CONFIG_DMA_API_DEBUG */
1161d15bd7eeSSumit Semwal return sg_table;
1162d15bd7eeSSumit Semwal
116316b0314aSGreg Kroah-Hartman error_unmap:
1164d15bd7eeSSumit Semwal attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
1165d15bd7eeSSumit Semwal sg_table = ERR_PTR(ret);
116619d6634dSDmitry Osipenko
116719d6634dSDmitry Osipenko error_unpin:
116819d6634dSDmitry Osipenko if (dma_buf_pin_on_map(attach))
116919d6634dSDmitry Osipenko attach->dmabuf->ops->unpin(attach);
117019d6634dSDmitry Osipenko
117119d6634dSDmitry Osipenko return sg_table;
117219d6634dSDmitry Osipenko }
117319d6634dSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment, "DMA_BUF");
117419d6634dSDmitry Osipenko
117519d6634dSDmitry Osipenko /**
117619d6634dSDmitry Osipenko * dma_buf_map_attachment_unlocked - Returns the scatterlist table of the attachment;
117719d6634dSDmitry Osipenko * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
117819d6634dSDmitry Osipenko * dma_buf_ops.
117919d6634dSDmitry Osipenko * @attach: [in] attachment whose scatterlist is to be returned
118019d6634dSDmitry Osipenko * @direction: [in] direction of DMA transfer
118119d6634dSDmitry Osipenko *
118219d6634dSDmitry Osipenko * Unlocked variant of dma_buf_map_attachment().
118319d6634dSDmitry Osipenko */
118419d6634dSDmitry Osipenko struct sg_table *
dma_buf_map_attachment_unlocked(struct dma_buf_attachment * attach,enum dma_data_direction direction)118519d6634dSDmitry Osipenko dma_buf_map_attachment_unlocked(struct dma_buf_attachment *attach,
118619d6634dSDmitry Osipenko enum dma_data_direction direction)
118719d6634dSDmitry Osipenko {
118819d6634dSDmitry Osipenko struct sg_table *sg_table;
118919d6634dSDmitry Osipenko
119019d6634dSDmitry Osipenko might_sleep();
119119d6634dSDmitry Osipenko
119219d6634dSDmitry Osipenko if (WARN_ON(!attach || !attach->dmabuf))
119319d6634dSDmitry Osipenko return ERR_PTR(-EINVAL);
1194d15bd7eeSSumit Semwal
1195d15bd7eeSSumit Semwal dma_resv_lock(attach->dmabuf->resv, NULL);
1196d15bd7eeSSumit Semwal sg_table = dma_buf_map_attachment(attach, direction);
1197d15bd7eeSSumit Semwal dma_resv_unlock(attach->dmabuf->resv);
1198d15bd7eeSSumit Semwal
119933ea2dcbSSumit Semwal return sg_table;
1200d15bd7eeSSumit Semwal }
12012904a8c1SDaniel Vetter EXPORT_SYMBOL_NS_GPL(dma_buf_map_attachment_unlocked, "DMA_BUF");
1202d15bd7eeSSumit Semwal
1203d15bd7eeSSumit Semwal /**
120433ea2dcbSSumit Semwal * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
120533ea2dcbSSumit Semwal * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
1206d15bd7eeSSumit Semwal * dma_buf_ops.
1207b6fa0cd6SRob Clark * @attach: [in] attachment to unmap buffer from
1208b6fa0cd6SRob Clark * @sg_table: [in] scatterlist info of the buffer to unmap
1209d1aa06a1SLaurent Pinchart * @direction: [in] direction of DMA transfer
1210d15bd7eeSSumit Semwal *
1211d15bd7eeSSumit Semwal * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
121215fd552dSChristian König */
dma_buf_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)121315fd552dSChristian König void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
1214f13e143eSChristian König struct sg_table *sg_table,
1215f13e143eSChristian König enum dma_data_direction direction)
1216f13e143eSChristian König {
121784335675SDaniel Vetter might_sleep();
1218bb42df46SChristian König
1219bb42df46SChristian König if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
12204981cdb0SChristian König return;
1221bb42df46SChristian König
1222d15bd7eeSSumit Semwal dma_resv_assert_held(attach->dmabuf->resv);
122316b0314aSGreg Kroah-Hartman
1224fc13020eSDaniel Vetter mangle_sg_table(sg_table);
12250959a168SDaniel Vetter attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
122619d6634dSDmitry Osipenko
122719d6634dSDmitry Osipenko if (dma_buf_pin_on_map(attach))
122819d6634dSDmitry Osipenko attach->dmabuf->ops->unpin(attach);
122919d6634dSDmitry Osipenko }
123019d6634dSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment, "DMA_BUF");
123119d6634dSDmitry Osipenko
123219d6634dSDmitry Osipenko /**
123319d6634dSDmitry Osipenko * dma_buf_unmap_attachment_unlocked - unmaps and decreases usecount of the buffer;might
123419d6634dSDmitry Osipenko * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
123519d6634dSDmitry Osipenko * dma_buf_ops.
123619d6634dSDmitry Osipenko * @attach: [in] attachment to unmap buffer from
123719d6634dSDmitry Osipenko * @sg_table: [in] scatterlist info of the buffer to unmap
123819d6634dSDmitry Osipenko * @direction: [in] direction of DMA transfer
123919d6634dSDmitry Osipenko *
124019d6634dSDmitry Osipenko * Unlocked variant of dma_buf_unmap_attachment().
124119d6634dSDmitry Osipenko */
dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)124219d6634dSDmitry Osipenko void dma_buf_unmap_attachment_unlocked(struct dma_buf_attachment *attach,
124319d6634dSDmitry Osipenko struct sg_table *sg_table,
124419d6634dSDmitry Osipenko enum dma_data_direction direction)
124519d6634dSDmitry Osipenko {
124619d6634dSDmitry Osipenko might_sleep();
124719d6634dSDmitry Osipenko
124819d6634dSDmitry Osipenko if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
124919d6634dSDmitry Osipenko return;
125019d6634dSDmitry Osipenko
1251bb42df46SChristian König dma_resv_lock(attach->dmabuf->resv, NULL);
1252bb42df46SChristian König dma_buf_unmap_attachment(attach, sg_table, direction);
1253bb42df46SChristian König dma_resv_unlock(attach->dmabuf->resv);
1254bb42df46SChristian König }
1255b56ffa58ST.J. Mercier EXPORT_SYMBOL_NS_GPL(dma_buf_unmap_attachment_unlocked, "DMA_BUF");
1256bb42df46SChristian König
1257bb42df46SChristian König /**
1258bb42df46SChristian König * dma_buf_move_notify - notify attachments that DMA-buf is moving
1259bb42df46SChristian König *
1260bb42df46SChristian König * @dmabuf: [in] buffer which is moving
1261bb42df46SChristian König *
1262bb42df46SChristian König * Informs all attachments that they need to destroy and recreate all their
1263bb42df46SChristian König * mappings.
1264bb42df46SChristian König */
dma_buf_move_notify(struct dma_buf * dmabuf)12654981cdb0SChristian König void dma_buf_move_notify(struct dma_buf *dmabuf)
1266bb42df46SChristian König {
1267bb42df46SChristian König struct dma_buf_attachment *attach;
126816b0314aSGreg Kroah-Hartman
1269bb42df46SChristian König dma_resv_assert_held(dmabuf->resv);
1270bb42df46SChristian König
12710959a168SDaniel Vetter list_for_each_entry(attach, &dmabuf->attachments, node)
12720959a168SDaniel Vetter if (attach->importer_ops)
1273b56ffa58ST.J. Mercier attach->importer_ops->move_notify(attach);
12740959a168SDaniel Vetter }
12750959a168SDaniel Vetter EXPORT_SYMBOL_NS_GPL(dma_buf_move_notify, "DMA_BUF");
12760959a168SDaniel Vetter
1277b56ffa58ST.J. Mercier /**
12780959a168SDaniel Vetter * DOC: cpu access
12790959a168SDaniel Vetter *
12800959a168SDaniel Vetter * There are multiple reasons for supporting CPU access to a dma buffer object:
12817f0de8d8SDaniel Vetter *
12827f0de8d8SDaniel Vetter * - Fallback operations in the kernel, for example when a device is connected
12837f0de8d8SDaniel Vetter * over USB and the kernel needs to shuffle the data around first before
12840959a168SDaniel Vetter * sending it away. Cache coherency is handled by bracketing any transactions
12850959a168SDaniel Vetter * with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
1286de9114ecSDaniel Vetter * access.
12877938f421SLucas De Marchi *
12887938f421SLucas De Marchi * Since for most kernel internal dma-buf accesses need the entire buffer, a
12890959a168SDaniel Vetter * vmap interface is introduced. Note that on very old 32-bit architectures
12900959a168SDaniel Vetter * vmalloc space might be limited and result in vmap calls failing.
1291de9114ecSDaniel Vetter *
1292de9114ecSDaniel Vetter * Interfaces:
1293de9114ecSDaniel Vetter *
1294de9114ecSDaniel Vetter * .. code-block:: c
12950959a168SDaniel Vetter *
12960959a168SDaniel Vetter * void *dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
12970959a168SDaniel Vetter * void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
12980959a168SDaniel Vetter *
12990959a168SDaniel Vetter * The vmap call can fail if there is no vmap support in the exporter, or if
13000959a168SDaniel Vetter * it runs out of vmalloc space. Note that the dma-buf layer keeps a reference
13010959a168SDaniel Vetter * count for all vmap access and calls down into the exporter's vmap function
13020959a168SDaniel Vetter * only when no vmapping exists, and only unmaps it once. Protection against
13030959a168SDaniel Vetter * concurrent vmap/vunmap calls is provided by taking the &dma_buf.lock mutex.
1304b56ffa58ST.J. Mercier *
13050959a168SDaniel Vetter * - For full compatibility on the importer side with existing userspace
13060959a168SDaniel Vetter * interfaces, which might already support mmap'ing buffers. This is needed in
13070959a168SDaniel Vetter * many processing pipelines (e.g. feeding a software rendered image into a
13080959a168SDaniel Vetter * hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
13090959a168SDaniel Vetter * framework already supported this and for DMA buffer file descriptors to
13100959a168SDaniel Vetter * replace ION buffers mmap support was needed.
13110959a168SDaniel Vetter *
13120959a168SDaniel Vetter * There is no special interfaces, userspace simply calls mmap on the dma-buf
13130959a168SDaniel Vetter * fd. But like for CPU access there's a need to bracket the actual access,
13140959a168SDaniel Vetter * which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
13150959a168SDaniel Vetter * DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
13160959a168SDaniel Vetter * be restarted.
13170959a168SDaniel Vetter *
13180959a168SDaniel Vetter * Some systems might need some sort of cache coherency management e.g. when
13190959a168SDaniel Vetter * CPU and GPU domains are being accessed through dma-buf at the same time.
13200959a168SDaniel Vetter * To circumvent this problem there are begin/end coherency markers, that
13210959a168SDaniel Vetter * forward directly to existing dma-buf device drivers vfunc hooks. Userspace
13220959a168SDaniel Vetter * can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
13230959a168SDaniel Vetter * sequence would be used like following:
13240959a168SDaniel Vetter *
13250959a168SDaniel Vetter * - mmap dma-buf fd
13260959a168SDaniel Vetter * - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
13270959a168SDaniel Vetter * to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
13280959a168SDaniel Vetter * want (with the new data being consumed by say the GPU or the scanout
13290959a168SDaniel Vetter * device)
13300959a168SDaniel Vetter * - munmap once you don't need the buffer any more
13310959a168SDaniel Vetter *
13320959a168SDaniel Vetter * For correctness and optimal performance, it is always required to use
13330959a168SDaniel Vetter * SYNC_START and SYNC_END before and after, respectively, when accessing the
13340959a168SDaniel Vetter * mapped address. Userspace cannot rely on coherent access, even when there
13350959a168SDaniel Vetter * are systems where it just works without calling these ioctls.
13360959a168SDaniel Vetter *
13370959a168SDaniel Vetter * - And as a CPU fallback in userspace processing pipelines.
13380959a168SDaniel Vetter *
13390959a168SDaniel Vetter * Similar to the motivation for kernel cpu access it is again important that
13400959a168SDaniel Vetter * the userspace code of a given importing subsystem can use the same
13410959a168SDaniel Vetter * interfaces with a imported dma-buf buffer object as with a native buffer
13420959a168SDaniel Vetter * object. This is especially important for drm where the userspace part of
13430959a168SDaniel Vetter * contemporary OpenGL, X, and other drivers is huge, and reworking them to
13440959a168SDaniel Vetter * use a different way to mmap a buffer rather invasive.
13450959a168SDaniel Vetter *
134685804b70SDaniel Vetter * The assumption in the current dma-buf interfaces is that redirecting the
13470959a168SDaniel Vetter * initial mmap is all that's needed. A survey of some of the existing
13480959a168SDaniel Vetter * subsystems shows that no driver seems to do any nefarious thing like
13490959a168SDaniel Vetter * syncing up with outstanding asynchronous processing on the device or
13500959a168SDaniel Vetter * allocating special resources at fault time. So hopefully this is good
135185804b70SDaniel Vetter * enough, since adding interfaces to intercept pagefaults and allow pte
13520959a168SDaniel Vetter * shootdowns would increase the complexity quite a bit.
13530959a168SDaniel Vetter *
13540959a168SDaniel Vetter * Interface:
1355ae4e46b1SChris Wilson *
1356ae4e46b1SChris Wilson * .. code-block:: c
1357ae4e46b1SChris Wilson *
1358ae4e46b1SChris Wilson * int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *, unsigned long);
1359ae4e46b1SChris Wilson *
136052791eeeSChristian König * If the importing subsystem simply provides a special-purpose mmap call to
1361ae4e46b1SChris Wilson * set up a mapping in userspace, calling do_mmap with &dma_buf.file will
1362ae4e46b1SChris Wilson * equally achieve that for a dma-buf object.
1363ae4e46b1SChris Wilson */
13647bc80a54SChristian König
__dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)13657bc80a54SChristian König static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1366ae4e46b1SChris Wilson enum dma_data_direction direction)
1367ae4e46b1SChris Wilson {
1368ae4e46b1SChris Wilson bool write = (direction == DMA_BIDIRECTIONAL ||
1369ae4e46b1SChris Wilson direction == DMA_TO_DEVICE);
1370ae4e46b1SChris Wilson struct dma_resv *resv = dmabuf->resv;
1371fc13020eSDaniel Vetter long ret;
1372fc13020eSDaniel Vetter
1373fc13020eSDaniel Vetter /* Wait on any implicit rendering fences */
1374fc13020eSDaniel Vetter ret = dma_resv_wait_timeout(resv, dma_resv_usage_rw(write),
1375fc13020eSDaniel Vetter true, MAX_SCHEDULE_TIMEOUT);
1376fc13020eSDaniel Vetter if (ret < 0)
1377efb4df82SRandy Dunlap return ret;
1378b56ffa58ST.J. Mercier
1379fc13020eSDaniel Vetter return 0;
13800959a168SDaniel Vetter }
1381b56ffa58ST.J. Mercier
13820959a168SDaniel Vetter /**
13830959a168SDaniel Vetter * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
1384de9114ecSDaniel Vetter * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
1385de9114ecSDaniel Vetter * preparations. Coherency is only guaranteed in the specified range for the
1386de9114ecSDaniel Vetter * specified access direction.
1387de9114ecSDaniel Vetter * @dmabuf: [in] buffer to prepare cpu access for.
1388de9114ecSDaniel Vetter * @direction: [in] direction of access.
1389fc13020eSDaniel Vetter *
1390fc13020eSDaniel Vetter * After the cpu access is complete the caller should call
1391831e9da7STiago Vignatti * dma_buf_end_cpu_access(). Only when cpu access is bracketed by both calls is
1392fc13020eSDaniel Vetter * it guaranteed to be coherent with other DMA access.
1393fc13020eSDaniel Vetter *
1394fc13020eSDaniel Vetter * This function will also wait for any DMA transactions tracked through
1395fc13020eSDaniel Vetter * implicit synchronization in &dma_buf.resv. For DMA transactions with explicit
1396fc13020eSDaniel Vetter * synchronization this function will only ensure cache coherency, callers must
1397fc13020eSDaniel Vetter * ensure synchronization with such DMA transactions on their own.
1398fc13020eSDaniel Vetter *
13998ccf0a29SDaniel Vetter * Can return negative error values, returns 0 on success.
14008ccf0a29SDaniel Vetter */
dma_buf_begin_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)1401fc13020eSDaniel Vetter int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
1402831e9da7STiago Vignatti enum dma_data_direction direction)
1403fc13020eSDaniel Vetter {
1404ae4e46b1SChris Wilson int ret = 0;
1405ae4e46b1SChris Wilson
1406ae4e46b1SChris Wilson if (WARN_ON(!dmabuf))
1407ae4e46b1SChris Wilson return -EINVAL;
1408ae4e46b1SChris Wilson
1409ae4e46b1SChris Wilson might_lock(&dmabuf->resv->lock.base);
1410ae4e46b1SChris Wilson
1411fc13020eSDaniel Vetter if (dmabuf->ops->begin_cpu_access)
1412fc13020eSDaniel Vetter ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
141316b0314aSGreg Kroah-Hartman
1414fc13020eSDaniel Vetter /* Ensure that all fences are waited upon - but we first allow
1415fc13020eSDaniel Vetter * the native handler the chance to do so more efficiently if it
1416fc13020eSDaniel Vetter * chooses. A double invocation here will be reasonably cheap no-op.
1417fc13020eSDaniel Vetter */
1418fc13020eSDaniel Vetter if (ret == 0)
1419fc13020eSDaniel Vetter ret = __dma_buf_begin_cpu_access(dmabuf, direction);
1420efb4df82SRandy Dunlap
1421b56ffa58ST.J. Mercier return ret;
1422fc13020eSDaniel Vetter }
14230959a168SDaniel Vetter EXPORT_SYMBOL_NS_GPL(dma_buf_begin_cpu_access, "DMA_BUF");
14240959a168SDaniel Vetter
142587e332d5SDaniel Vetter /**
1426fc13020eSDaniel Vetter * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
142718b862dcSChris Wilson * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
1428fc13020eSDaniel Vetter * actions. Coherency is only guaranteed in the specified range for the
1429fc13020eSDaniel Vetter * specified access direction.
143018b862dcSChris Wilson * @dmabuf: [in] buffer to complete cpu access for.
143118b862dcSChris Wilson * @direction: [in] direction of access.
1432fc13020eSDaniel Vetter *
1433fc13020eSDaniel Vetter * This terminates CPU access started with dma_buf_begin_cpu_access().
14348ccf0a29SDaniel Vetter *
14358ccf0a29SDaniel Vetter * Can return negative error values, returns 0 on success.
1436fc13020eSDaniel Vetter */
dma_buf_end_cpu_access(struct dma_buf * dmabuf,enum dma_data_direction direction)143718b862dcSChris Wilson int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
143818b862dcSChris Wilson enum dma_data_direction direction)
143918b862dcSChris Wilson {
1440fc13020eSDaniel Vetter int ret = 0;
144116b0314aSGreg Kroah-Hartman
1442fc13020eSDaniel Vetter WARN_ON(!dmabuf);
14434c78513eSDaniel Vetter
14444c78513eSDaniel Vetter might_lock(&dmabuf->resv->lock.base);
14454c78513eSDaniel Vetter
144612c4727eSSumit Semwal if (dmabuf->ops->end_cpu_access)
14474c78513eSDaniel Vetter ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
14484c78513eSDaniel Vetter
14494c78513eSDaniel Vetter return ret;
14504c78513eSDaniel Vetter }
14514c78513eSDaniel Vetter EXPORT_SYMBOL_NS_GPL(dma_buf_end_cpu_access, "DMA_BUF");
1452ecf1dbacSJavier Martinez Canillas
14534c78513eSDaniel Vetter
14544c78513eSDaniel Vetter /**
14554c78513eSDaniel Vetter * dma_buf_mmap - Setup up a userspace mmap with the given vma
14564c78513eSDaniel Vetter * @dmabuf: [in] buffer that should back the vma
14574c78513eSDaniel Vetter * @vma: [in] vma for the mmap
14584c78513eSDaniel Vetter * @pgoff: [in] offset in pages where this mmap should start within the
14594c78513eSDaniel Vetter * dma-buf buffer.
14604c78513eSDaniel Vetter *
14614c78513eSDaniel Vetter * This function adjusts the passed in vma so that it points at the file of the
14624c78513eSDaniel Vetter * dma_buf operation. It also adjusts the starting pgoff and does bounds
14634c78513eSDaniel Vetter * checking on the size of the vma. Then it calls the exporters mmap function to
1464e3a9d6c5SAndrew F. Davis * set up the mapping.
1465e3a9d6c5SAndrew F. Davis *
1466e3a9d6c5SAndrew F. Davis * Can return negative error values, returns 0 on success.
1467e3a9d6c5SAndrew F. Davis */
dma_buf_mmap(struct dma_buf * dmabuf,struct vm_area_struct * vma,unsigned long pgoff)14684c78513eSDaniel Vetter int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
1469b02da6f8SMuhammad Falak R Wani unsigned long pgoff)
14704c78513eSDaniel Vetter {
14714c78513eSDaniel Vetter if (WARN_ON(!dmabuf || !vma))
14724c78513eSDaniel Vetter return -EINVAL;
1473b02da6f8SMuhammad Falak R Wani
14744c78513eSDaniel Vetter /* check if buffer supports mmap */
14754c78513eSDaniel Vetter if (!dmabuf->ops->mmap)
14764c78513eSDaniel Vetter return -EINVAL;
14774c78513eSDaniel Vetter
1478295992fbSChristian König /* check for offset overflow */
14794c78513eSDaniel Vetter if (pgoff + vma_pages(vma) < pgoff)
14804c78513eSDaniel Vetter return -EOVERFLOW;
14818021fa16SDmitry Osipenko
14824c78513eSDaniel Vetter /* check for overflowing the buffer's size */
148316b0314aSGreg Kroah-Hartman if (pgoff + vma_pages(vma) >
148498f86c9eSDave Airlie dmabuf->size >> PAGE_SHIFT)
148598f86c9eSDave Airlie return -EINVAL;
148612c4727eSSumit Semwal
148712c4727eSSumit Semwal /* readjust the vma */
148812c4727eSSumit Semwal vma_set_file(vma, dmabuf->file);
14896619ccf1SThomas Zimmermann vma->vm_pgoff = pgoff;
149098f86c9eSDave Airlie
149198f86c9eSDave Airlie return dmabuf->ops->mmap(dmabuf, vma);
149298f86c9eSDave Airlie }
149398f86c9eSDave Airlie EXPORT_SYMBOL_NS_GPL(dma_buf_mmap, "DMA_BUF");
1494de9114ecSDaniel Vetter
1495de9114ecSDaniel Vetter /**
1496de9114ecSDaniel Vetter * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
1497de9114ecSDaniel Vetter * address space. Same restrictions as for vmap and friends apply.
1498fee0c54eSColin Cross * @dmabuf: [in] buffer to vmap
14996619ccf1SThomas Zimmermann * @map: [out] returns the vmap pointer
150098f86c9eSDave Airlie *
15017938f421SLucas De Marchi * This call may fail due to lack of virtual mapping address space.
150298f86c9eSDave Airlie * These calls are optional in drivers. The intended use for them
15037938f421SLucas De Marchi * is for mapping objects linear in kernel space for high use objects.
150428743e25SDmitry Osipenko *
15056619ccf1SThomas Zimmermann * To ensure coherency users must call dma_buf_begin_cpu_access() and
15067938f421SLucas De Marchi * dma_buf_end_cpu_access() around any cpu access performed through this
1507f00b4dadSDaniel Vetter * mapping.
150898f86c9eSDave Airlie *
15096619ccf1SThomas Zimmermann * Returns 0 on success, or a negative errno code otherwise.
151098f86c9eSDave Airlie */
dma_buf_vmap(struct dma_buf * dmabuf,struct iosys_map * map)151134c7797fSDmitry Osipenko int dma_buf_vmap(struct dma_buf *dmabuf, struct iosys_map *map)
151234c7797fSDmitry Osipenko {
1513f00b4dadSDaniel Vetter struct iosys_map ptr;
15146619ccf1SThomas Zimmermann int ret;
1515f00b4dadSDaniel Vetter
1516f00b4dadSDaniel Vetter iosys_map_clear(map);
1517f00b4dadSDaniel Vetter
15187938f421SLucas De Marchi if (WARN_ON(!dmabuf))
15196619ccf1SThomas Zimmermann return -EINVAL;
152028743e25SDmitry Osipenko
1521f00b4dadSDaniel Vetter dma_resv_assert_held(dmabuf->resv);
1522f00b4dadSDaniel Vetter
15237938f421SLucas De Marchi if (!dmabuf->ops->vmap)
1524f00b4dadSDaniel Vetter return -EINVAL;
15256619ccf1SThomas Zimmermann
15266619ccf1SThomas Zimmermann if (dmabuf->vmapping_counter) {
152728743e25SDmitry Osipenko dmabuf->vmapping_counter++;
1528f00b4dadSDaniel Vetter BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
15296619ccf1SThomas Zimmermann *map = dmabuf->vmap_ptr;
1530f00b4dadSDaniel Vetter return 0;
1531f00b4dadSDaniel Vetter }
15326619ccf1SThomas Zimmermann
15336619ccf1SThomas Zimmermann BUG_ON(iosys_map_is_set(&dmabuf->vmap_ptr));
153428743e25SDmitry Osipenko
153598f86c9eSDave Airlie ret = dmabuf->ops->vmap(dmabuf, &ptr);
153616b0314aSGreg Kroah-Hartman if (WARN_ON_ONCE(ret))
153798f86c9eSDave Airlie return ret;
153898f86c9eSDave Airlie
153956e5abbaSDmitry Osipenko dmabuf->vmap_ptr = ptr;
154056e5abbaSDmitry Osipenko dmabuf->vmapping_counter = 1;
154156e5abbaSDmitry Osipenko
154256e5abbaSDmitry Osipenko *map = dmabuf->vmap_ptr;
154356e5abbaSDmitry Osipenko
154456e5abbaSDmitry Osipenko return 0;
154556e5abbaSDmitry Osipenko }
154656e5abbaSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_vmap, "DMA_BUF");
154756e5abbaSDmitry Osipenko
154856e5abbaSDmitry Osipenko /**
154956e5abbaSDmitry Osipenko * dma_buf_vmap_unlocked - Create virtual mapping for the buffer object into kernel
155056e5abbaSDmitry Osipenko * address space. Same restrictions as for vmap and friends apply.
155156e5abbaSDmitry Osipenko * @dmabuf: [in] buffer to vmap
155256e5abbaSDmitry Osipenko * @map: [out] returns the vmap pointer
155356e5abbaSDmitry Osipenko *
155456e5abbaSDmitry Osipenko * Unlocked version of dma_buf_vmap()
155556e5abbaSDmitry Osipenko *
155656e5abbaSDmitry Osipenko * Returns 0 on success, or a negative errno code otherwise.
155756e5abbaSDmitry Osipenko */
dma_buf_vmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)155856e5abbaSDmitry Osipenko int dma_buf_vmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
155956e5abbaSDmitry Osipenko {
156056e5abbaSDmitry Osipenko int ret;
156156e5abbaSDmitry Osipenko
156256e5abbaSDmitry Osipenko iosys_map_clear(map);
156356e5abbaSDmitry Osipenko
156456e5abbaSDmitry Osipenko if (WARN_ON(!dmabuf))
156556e5abbaSDmitry Osipenko return -EINVAL;
156698f86c9eSDave Airlie
156712c4727eSSumit Semwal dma_resv_lock(dmabuf->resv, NULL);
156820e76f1aSThomas Zimmermann ret = dma_buf_vmap(dmabuf, map);
156998f86c9eSDave Airlie dma_resv_unlock(dmabuf->resv);
15707938f421SLucas De Marchi
157198f86c9eSDave Airlie return ret;
157298f86c9eSDave Airlie }
157398f86c9eSDave Airlie EXPORT_SYMBOL_NS_GPL(dma_buf_vmap_unlocked, "DMA_BUF");
157498f86c9eSDave Airlie
157534c7797fSDmitry Osipenko /**
157634c7797fSDmitry Osipenko * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
15777938f421SLucas De Marchi * @dmabuf: [in] buffer to vunmap
1578f00b4dadSDaniel Vetter * @map: [in] vmap pointer to vunmap
15797938f421SLucas De Marchi */
dma_buf_vunmap(struct dma_buf * dmabuf,struct iosys_map * map)1580f00b4dadSDaniel Vetter void dma_buf_vunmap(struct dma_buf *dmabuf, struct iosys_map *map)
1581f00b4dadSDaniel Vetter {
158298f86c9eSDave Airlie if (WARN_ON(!dmabuf))
158320e76f1aSThomas Zimmermann return;
15847938f421SLucas De Marchi
1585f00b4dadSDaniel Vetter dma_resv_assert_held(dmabuf->resv);
158698f86c9eSDave Airlie
158716b0314aSGreg Kroah-Hartman BUG_ON(iosys_map_is_null(&dmabuf->vmap_ptr));
1588b89e3563SSumit Semwal BUG_ON(dmabuf->vmapping_counter == 0);
158956e5abbaSDmitry Osipenko BUG_ON(!iosys_map_is_equal(&dmabuf->vmap_ptr, map));
159056e5abbaSDmitry Osipenko
159156e5abbaSDmitry Osipenko if (--dmabuf->vmapping_counter == 0) {
159256e5abbaSDmitry Osipenko if (dmabuf->ops->vunmap)
159356e5abbaSDmitry Osipenko dmabuf->ops->vunmap(dmabuf, map);
159456e5abbaSDmitry Osipenko iosys_map_clear(&dmabuf->vmap_ptr);
159556e5abbaSDmitry Osipenko }
159656e5abbaSDmitry Osipenko }
159756e5abbaSDmitry Osipenko EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap, "DMA_BUF");
159856e5abbaSDmitry Osipenko
159956e5abbaSDmitry Osipenko /**
160056e5abbaSDmitry Osipenko * dma_buf_vunmap_unlocked - Unmap a vmap obtained by dma_buf_vmap.
160156e5abbaSDmitry Osipenko * @dmabuf: [in] buffer to vunmap
160256e5abbaSDmitry Osipenko * @map: [in] vmap pointer to vunmap
160356e5abbaSDmitry Osipenko */
dma_buf_vunmap_unlocked(struct dma_buf * dmabuf,struct iosys_map * map)160456e5abbaSDmitry Osipenko void dma_buf_vunmap_unlocked(struct dma_buf *dmabuf, struct iosys_map *map)
1605b89e3563SSumit Semwal {
1606eb0b947eSMathias Krause if (WARN_ON(!dmabuf))
1607b89e3563SSumit Semwal return;
1608b89e3563SSumit Semwal
1609b89e3563SSumit Semwal dma_resv_lock(dmabuf->resv, NULL);
161063639d01SChristian König dma_buf_vunmap(dmabuf, map);
1611b89e3563SSumit Semwal dma_resv_unlock(dmabuf->resv);
1612680753ddSChristian König }
1613b89e3563SSumit Semwal EXPORT_SYMBOL_NS_GPL(dma_buf_vunmap_unlocked, "DMA_BUF");
1614b89e3563SSumit Semwal
1615b89e3563SSumit Semwal #ifdef CONFIG_DEBUG_FS
dma_buf_debug_show(struct seq_file * s,void * unused)1616b89e3563SSumit Semwal static int dma_buf_debug_show(struct seq_file *s, void *unused)
1617b89e3563SSumit Semwal {
1618b89e3563SSumit Semwal struct dma_buf *buf_obj;
1619c0b00a52SSumit Semwal struct dma_buf_attachment *attach_obj;
16206c01aa13SYuanzheng Song int count = 0, attach_count;
1621ed63bb1dSGreg Hackmann size_t size = 0;
1622b89e3563SSumit Semwal int ret;
1623b89e3563SSumit Semwal
1624b89e3563SSumit Semwal ret = mutex_lock_interruptible(&dmabuf_list_mutex);
162515fd552dSChristian König
162615fd552dSChristian König if (ret)
1627f45f57ccSChristian König return ret;
1628b89e3563SSumit Semwal
16298c0fd126SGuangming Cao seq_puts(s, "\nDma-buf Objects:\n");
16308c0fd126SGuangming Cao seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\tname\n",
1631bb2bb903SGreg Hackmann "size", "flags", "mode", "count", "ino");
1632c0b00a52SSumit Semwal
1633b89e3563SSumit Semwal list_for_each_entry(buf_obj, &dmabuf_list, list_node) {
1634a1f6dbacSAl Viro
1635ed63bb1dSGreg Hackmann ret = dma_resv_lock_interruptible(buf_obj->resv, NULL);
1636bb2bb903SGreg Hackmann if (ret)
16376c01aa13SYuanzheng Song goto error_unlock;
16388c0fd126SGuangming Cao
1639b89e3563SSumit Semwal
1640a25efb38SChristian König spin_lock(&buf_obj->name_lock);
16415eb2c72cSRussell King seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1642c0b00a52SSumit Semwal buf_obj->size,
1643b89e3563SSumit Semwal buf_obj->file->f_flags, buf_obj->file->f_mode,
1644b89e3563SSumit Semwal file_count(buf_obj->file),
1645b89e3563SSumit Semwal buf_obj->exp_name,
16469eddb41dSMarkus Elfring file_inode(buf_obj->file)->i_ino,
1647b89e3563SSumit Semwal buf_obj->name ?: "<none>");
1648b89e3563SSumit Semwal spin_unlock(&buf_obj->name_lock);
164915fd552dSChristian König
1650b89e3563SSumit Semwal dma_resv_describe(buf_obj->resv, s);
1651c0b00a52SSumit Semwal
1652b89e3563SSumit Semwal seq_puts(s, "\tAttached Devices:\n");
1653b89e3563SSumit Semwal attach_count = 0;
1654b89e3563SSumit Semwal
1655b89e3563SSumit Semwal list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
1656b89e3563SSumit Semwal seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1657b89e3563SSumit Semwal attach_count++;
1658b89e3563SSumit Semwal }
1659b89e3563SSumit Semwal dma_resv_unlock(buf_obj->resv);
1660b89e3563SSumit Semwal
1661b89e3563SSumit Semwal seq_printf(s, "Total %d devices attached\n\n",
166215fd552dSChristian König attach_count);
1663f45f57ccSChristian König
166415fd552dSChristian König count++;
166515fd552dSChristian König size += buf_obj->size;
1666b89e3563SSumit Semwal }
1667b89e3563SSumit Semwal
16682674305aSYangtao Li seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1669b89e3563SSumit Semwal
1670b89e3563SSumit Semwal mutex_unlock(&dmabuf_list_mutex);
1671b89e3563SSumit Semwal return 0;
1672b89e3563SSumit Semwal
1673b89e3563SSumit Semwal error_unlock:
1674bd3e2208SMathias Krause mutex_unlock(&dmabuf_list_mutex);
1675b89e3563SSumit Semwal return ret;
16765136629dSJagan Teki }
1677bd3e2208SMathias Krause
1678bd3e2208SMathias Krause DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1679bd3e2208SMathias Krause
16805136629dSJagan Teki static struct dentry *dma_buf_debugfs_dir;
1681bd3e2208SMathias Krause
dma_buf_init_debugfs(void)1682b89e3563SSumit Semwal static int dma_buf_init_debugfs(void)
1683bd3e2208SMathias Krause {
1684bd3e2208SMathias Krause struct dentry *d;
1685bd3e2208SMathias Krause int err = 0;
1686b89e3563SSumit Semwal
1687b7479990SMathias Krause d = debugfs_create_dir("dma_buf", NULL);
1688b7479990SMathias Krause if (IS_ERR(d))
1689bd3e2208SMathias Krause return PTR_ERR(d);
1690b7479990SMathias Krause
1691b89e3563SSumit Semwal dma_buf_debugfs_dir = d;
1692b89e3563SSumit Semwal
1693b89e3563SSumit Semwal d = debugfs_create_file("bufinfo", 0444, dma_buf_debugfs_dir,
1694b89e3563SSumit Semwal NULL, &dma_buf_debug_fops);
1695b89e3563SSumit Semwal if (IS_ERR(d)) {
1696b89e3563SSumit Semwal pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1697b89e3563SSumit Semwal debugfs_remove_recursive(dma_buf_debugfs_dir);
1698b89e3563SSumit Semwal dma_buf_debugfs_dir = NULL;
1699b89e3563SSumit Semwal err = PTR_ERR(d);
1700b89e3563SSumit Semwal }
1701b89e3563SSumit Semwal
1702b89e3563SSumit Semwal return err;
1703b89e3563SSumit Semwal }
1704b89e3563SSumit Semwal
dma_buf_uninit_debugfs(void)1705b89e3563SSumit Semwal static void dma_buf_uninit_debugfs(void)
1706b89e3563SSumit Semwal {
1707b89e3563SSumit Semwal debugfs_remove_recursive(dma_buf_debugfs_dir);
1708b89e3563SSumit Semwal }
1709b89e3563SSumit Semwal #else
dma_buf_init_debugfs(void)1710b89e3563SSumit Semwal static inline int dma_buf_init_debugfs(void)
1711bdb8d06dSHridya Valsaraju {
1712bdb8d06dSHridya Valsaraju return 0;
1713bdb8d06dSHridya Valsaraju }
dma_buf_uninit_debugfs(void)1714bdb8d06dSHridya Valsaraju static inline void dma_buf_uninit_debugfs(void)
1715bdb8d06dSHridya Valsaraju {
1716bdb8d06dSHridya Valsaraju }
1717ed63bb1dSGreg Hackmann #endif
1718ed63bb1dSGreg Hackmann
dma_buf_init(void)1719ed63bb1dSGreg Hackmann static int __init dma_buf_init(void)
1720ed63bb1dSGreg Hackmann {
1721b89e3563SSumit Semwal int ret;
1722b89e3563SSumit Semwal
1723b89e3563SSumit Semwal ret = dma_buf_init_sysfs_statistics();
1724b89e3563SSumit Semwal if (ret)
1725b89e3563SSumit Semwal return ret;
1726b89e3563SSumit Semwal
1727b89e3563SSumit Semwal dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1728b89e3563SSumit Semwal if (IS_ERR(dma_buf_mnt))
1729b89e3563SSumit Semwal return PTR_ERR(dma_buf_mnt);
1730b89e3563SSumit Semwal
1731ed63bb1dSGreg Hackmann dma_buf_init_debugfs();
1732bdb8d06dSHridya Valsaraju return 0;
1733b89e3563SSumit Semwal }
1734b89e3563SSumit Semwal subsys_initcall(dma_buf_init);
1735
dma_buf_deinit(void)1736 static void __exit dma_buf_deinit(void)
1737 {
1738 dma_buf_uninit_debugfs();
1739 kern_unmount(dma_buf_mnt);
1740 dma_buf_uninit_sysfs_statistics();
1741 }
1742 __exitcall(dma_buf_deinit);
1743