xref: /linux/drivers/dma-buf/dma-buf.c (revision bb2bb903042517b8fb17b2bc21e00512f2dcac01)
1d15bd7eeSSumit Semwal /*
2d15bd7eeSSumit Semwal  * Framework for buffer objects that can be shared across devices/subsystems.
3d15bd7eeSSumit Semwal  *
4d15bd7eeSSumit Semwal  * Copyright(C) 2011 Linaro Limited. All rights reserved.
5d15bd7eeSSumit Semwal  * Author: Sumit Semwal <sumit.semwal@ti.com>
6d15bd7eeSSumit Semwal  *
7d15bd7eeSSumit Semwal  * Many thanks to linaro-mm-sig list, and specially
8d15bd7eeSSumit Semwal  * Arnd Bergmann <arnd@arndb.de>, Rob Clark <rob@ti.com> and
9d15bd7eeSSumit Semwal  * Daniel Vetter <daniel@ffwll.ch> for their support in creation and
10d15bd7eeSSumit Semwal  * refining of this idea.
11d15bd7eeSSumit Semwal  *
12d15bd7eeSSumit Semwal  * This program is free software; you can redistribute it and/or modify it
13d15bd7eeSSumit Semwal  * under the terms of the GNU General Public License version 2 as published by
14d15bd7eeSSumit Semwal  * the Free Software Foundation.
15d15bd7eeSSumit Semwal  *
16d15bd7eeSSumit Semwal  * This program is distributed in the hope that it will be useful, but WITHOUT
17d15bd7eeSSumit Semwal  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
18d15bd7eeSSumit Semwal  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
19d15bd7eeSSumit Semwal  * more details.
20d15bd7eeSSumit Semwal  *
21d15bd7eeSSumit Semwal  * You should have received a copy of the GNU General Public License along with
22d15bd7eeSSumit Semwal  * this program.  If not, see <http://www.gnu.org/licenses/>.
23d15bd7eeSSumit Semwal  */
24d15bd7eeSSumit Semwal 
25d15bd7eeSSumit Semwal #include <linux/fs.h>
26d15bd7eeSSumit Semwal #include <linux/slab.h>
27d15bd7eeSSumit Semwal #include <linux/dma-buf.h>
28f54d1867SChris Wilson #include <linux/dma-fence.h>
29d15bd7eeSSumit Semwal #include <linux/anon_inodes.h>
30d15bd7eeSSumit Semwal #include <linux/export.h>
31b89e3563SSumit Semwal #include <linux/debugfs.h>
329abdffe2SSumit Semwal #include <linux/module.h>
33b89e3563SSumit Semwal #include <linux/seq_file.h>
349b495a58SMaarten Lankhorst #include <linux/poll.h>
353aac4502SMaarten Lankhorst #include <linux/reservation.h>
36b02da6f8SMuhammad Falak R Wani #include <linux/mm.h>
37ed63bb1dSGreg Hackmann #include <linux/mount.h>
38d15bd7eeSSumit Semwal 
39c11e391dSDaniel Vetter #include <uapi/linux/dma-buf.h>
40ed63bb1dSGreg Hackmann #include <uapi/linux/magic.h>
41c11e391dSDaniel Vetter 
42d15bd7eeSSumit Semwal static inline int is_dma_buf_file(struct file *);
43d15bd7eeSSumit Semwal 
44b89e3563SSumit Semwal struct dma_buf_list {
45b89e3563SSumit Semwal 	struct list_head head;
46b89e3563SSumit Semwal 	struct mutex lock;
47b89e3563SSumit Semwal };
48b89e3563SSumit Semwal 
49b89e3563SSumit Semwal static struct dma_buf_list db_list;
50b89e3563SSumit Semwal 
51*bb2bb903SGreg Hackmann static char *dmabuffs_dname(struct dentry *dentry, char *buffer, int buflen)
52*bb2bb903SGreg Hackmann {
53*bb2bb903SGreg Hackmann 	struct dma_buf *dmabuf;
54*bb2bb903SGreg Hackmann 	char name[DMA_BUF_NAME_LEN];
55*bb2bb903SGreg Hackmann 	size_t ret = 0;
56*bb2bb903SGreg Hackmann 
57*bb2bb903SGreg Hackmann 	dmabuf = dentry->d_fsdata;
58*bb2bb903SGreg Hackmann 	mutex_lock(&dmabuf->lock);
59*bb2bb903SGreg Hackmann 	if (dmabuf->name)
60*bb2bb903SGreg Hackmann 		ret = strlcpy(name, dmabuf->name, DMA_BUF_NAME_LEN);
61*bb2bb903SGreg Hackmann 	mutex_unlock(&dmabuf->lock);
62*bb2bb903SGreg Hackmann 
63*bb2bb903SGreg Hackmann 	return dynamic_dname(dentry, buffer, buflen, "/%s:%s",
64*bb2bb903SGreg Hackmann 			     dentry->d_name.name, ret > 0 ? name : "");
65*bb2bb903SGreg Hackmann }
66*bb2bb903SGreg Hackmann 
67ed63bb1dSGreg Hackmann static const struct dentry_operations dma_buf_dentry_ops = {
68*bb2bb903SGreg Hackmann 	.d_dname = dmabuffs_dname,
69ed63bb1dSGreg Hackmann };
70ed63bb1dSGreg Hackmann 
71ed63bb1dSGreg Hackmann static struct vfsmount *dma_buf_mnt;
72ed63bb1dSGreg Hackmann 
73ed63bb1dSGreg Hackmann static struct dentry *dma_buf_fs_mount(struct file_system_type *fs_type,
74ed63bb1dSGreg Hackmann 		int flags, const char *name, void *data)
75ed63bb1dSGreg Hackmann {
76ed63bb1dSGreg Hackmann 	return mount_pseudo(fs_type, "dmabuf:", NULL, &dma_buf_dentry_ops,
77ed63bb1dSGreg Hackmann 			DMA_BUF_MAGIC);
78ed63bb1dSGreg Hackmann }
79ed63bb1dSGreg Hackmann 
80ed63bb1dSGreg Hackmann static struct file_system_type dma_buf_fs_type = {
81ed63bb1dSGreg Hackmann 	.name = "dmabuf",
82ed63bb1dSGreg Hackmann 	.mount = dma_buf_fs_mount,
83ed63bb1dSGreg Hackmann 	.kill_sb = kill_anon_super,
84ed63bb1dSGreg Hackmann };
85ed63bb1dSGreg Hackmann 
86d15bd7eeSSumit Semwal static int dma_buf_release(struct inode *inode, struct file *file)
87d15bd7eeSSumit Semwal {
88d15bd7eeSSumit Semwal 	struct dma_buf *dmabuf;
89d15bd7eeSSumit Semwal 
90d15bd7eeSSumit Semwal 	if (!is_dma_buf_file(file))
91d15bd7eeSSumit Semwal 		return -EINVAL;
92d15bd7eeSSumit Semwal 
93d15bd7eeSSumit Semwal 	dmabuf = file->private_data;
94d15bd7eeSSumit Semwal 
95f00b4dadSDaniel Vetter 	BUG_ON(dmabuf->vmapping_counter);
96f00b4dadSDaniel Vetter 
979b495a58SMaarten Lankhorst 	/*
989b495a58SMaarten Lankhorst 	 * Any fences that a dma-buf poll can wait on should be signaled
999b495a58SMaarten Lankhorst 	 * before releasing dma-buf. This is the responsibility of each
1009b495a58SMaarten Lankhorst 	 * driver that uses the reservation objects.
1019b495a58SMaarten Lankhorst 	 *
1029b495a58SMaarten Lankhorst 	 * If you hit this BUG() it means someone dropped their ref to the
1039b495a58SMaarten Lankhorst 	 * dma-buf while still having pending operation to the buffer.
1049b495a58SMaarten Lankhorst 	 */
1059b495a58SMaarten Lankhorst 	BUG_ON(dmabuf->cb_shared.active || dmabuf->cb_excl.active);
1069b495a58SMaarten Lankhorst 
107d15bd7eeSSumit Semwal 	dmabuf->ops->release(dmabuf);
108b89e3563SSumit Semwal 
109b89e3563SSumit Semwal 	mutex_lock(&db_list.lock);
110b89e3563SSumit Semwal 	list_del(&dmabuf->list_node);
111b89e3563SSumit Semwal 	mutex_unlock(&db_list.lock);
112b89e3563SSumit Semwal 
1133aac4502SMaarten Lankhorst 	if (dmabuf->resv == (struct reservation_object *)&dmabuf[1])
1143aac4502SMaarten Lankhorst 		reservation_object_fini(dmabuf->resv);
1153aac4502SMaarten Lankhorst 
1169abdffe2SSumit Semwal 	module_put(dmabuf->owner);
117d15bd7eeSSumit Semwal 	kfree(dmabuf);
118d15bd7eeSSumit Semwal 	return 0;
119d15bd7eeSSumit Semwal }
120d15bd7eeSSumit Semwal 
1214c78513eSDaniel Vetter static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
1224c78513eSDaniel Vetter {
1234c78513eSDaniel Vetter 	struct dma_buf *dmabuf;
1244c78513eSDaniel Vetter 
1254c78513eSDaniel Vetter 	if (!is_dma_buf_file(file))
1264c78513eSDaniel Vetter 		return -EINVAL;
1274c78513eSDaniel Vetter 
1284c78513eSDaniel Vetter 	dmabuf = file->private_data;
1294c78513eSDaniel Vetter 
130e3a9d6c5SAndrew F. Davis 	/* check if buffer supports mmap */
131e3a9d6c5SAndrew F. Davis 	if (!dmabuf->ops->mmap)
132e3a9d6c5SAndrew F. Davis 		return -EINVAL;
133e3a9d6c5SAndrew F. Davis 
1344c78513eSDaniel Vetter 	/* check for overflowing the buffer's size */
135b02da6f8SMuhammad Falak R Wani 	if (vma->vm_pgoff + vma_pages(vma) >
1364c78513eSDaniel Vetter 	    dmabuf->size >> PAGE_SHIFT)
1374c78513eSDaniel Vetter 		return -EINVAL;
1384c78513eSDaniel Vetter 
1394c78513eSDaniel Vetter 	return dmabuf->ops->mmap(dmabuf, vma);
1404c78513eSDaniel Vetter }
1414c78513eSDaniel Vetter 
14219e8697bSChristopher James Halse Rogers static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
14319e8697bSChristopher James Halse Rogers {
14419e8697bSChristopher James Halse Rogers 	struct dma_buf *dmabuf;
14519e8697bSChristopher James Halse Rogers 	loff_t base;
14619e8697bSChristopher James Halse Rogers 
14719e8697bSChristopher James Halse Rogers 	if (!is_dma_buf_file(file))
14819e8697bSChristopher James Halse Rogers 		return -EBADF;
14919e8697bSChristopher James Halse Rogers 
15019e8697bSChristopher James Halse Rogers 	dmabuf = file->private_data;
15119e8697bSChristopher James Halse Rogers 
15219e8697bSChristopher James Halse Rogers 	/* only support discovering the end of the buffer,
15319e8697bSChristopher James Halse Rogers 	   but also allow SEEK_SET to maintain the idiomatic
15419e8697bSChristopher James Halse Rogers 	   SEEK_END(0), SEEK_CUR(0) pattern */
15519e8697bSChristopher James Halse Rogers 	if (whence == SEEK_END)
15619e8697bSChristopher James Halse Rogers 		base = dmabuf->size;
15719e8697bSChristopher James Halse Rogers 	else if (whence == SEEK_SET)
15819e8697bSChristopher James Halse Rogers 		base = 0;
15919e8697bSChristopher James Halse Rogers 	else
16019e8697bSChristopher James Halse Rogers 		return -EINVAL;
16119e8697bSChristopher James Halse Rogers 
16219e8697bSChristopher James Halse Rogers 	if (offset != 0)
16319e8697bSChristopher James Halse Rogers 		return -EINVAL;
16419e8697bSChristopher James Halse Rogers 
16519e8697bSChristopher James Halse Rogers 	return base + offset;
16619e8697bSChristopher James Halse Rogers }
16719e8697bSChristopher James Halse Rogers 
168e7e21c72SDaniel Vetter /**
169e7e21c72SDaniel Vetter  * DOC: fence polling
170e7e21c72SDaniel Vetter  *
171e7e21c72SDaniel Vetter  * To support cross-device and cross-driver synchronization of buffer access
172f641d3b5SDaniel Vetter  * implicit fences (represented internally in the kernel with &struct fence) can
173e7e21c72SDaniel Vetter  * be attached to a &dma_buf. The glue for that and a few related things are
174e7e21c72SDaniel Vetter  * provided in the &reservation_object structure.
175e7e21c72SDaniel Vetter  *
176e7e21c72SDaniel Vetter  * Userspace can query the state of these implicitly tracked fences using poll()
177e7e21c72SDaniel Vetter  * and related system calls:
178e7e21c72SDaniel Vetter  *
179a9a08845SLinus Torvalds  * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
180e7e21c72SDaniel Vetter  *   most recent write or exclusive fence.
181e7e21c72SDaniel Vetter  *
182a9a08845SLinus Torvalds  * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
183e7e21c72SDaniel Vetter  *   all attached fences, shared and exclusive ones.
184e7e21c72SDaniel Vetter  *
185e7e21c72SDaniel Vetter  * Note that this only signals the completion of the respective fences, i.e. the
186e7e21c72SDaniel Vetter  * DMA transfers are complete. Cache flushing and any other necessary
187e7e21c72SDaniel Vetter  * preparations before CPU access can begin still need to happen.
188e7e21c72SDaniel Vetter  */
189e7e21c72SDaniel Vetter 
190f54d1867SChris Wilson static void dma_buf_poll_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
1919b495a58SMaarten Lankhorst {
1929b495a58SMaarten Lankhorst 	struct dma_buf_poll_cb_t *dcb = (struct dma_buf_poll_cb_t *)cb;
1939b495a58SMaarten Lankhorst 	unsigned long flags;
1949b495a58SMaarten Lankhorst 
1959b495a58SMaarten Lankhorst 	spin_lock_irqsave(&dcb->poll->lock, flags);
1969b495a58SMaarten Lankhorst 	wake_up_locked_poll(dcb->poll, dcb->active);
1979b495a58SMaarten Lankhorst 	dcb->active = 0;
1989b495a58SMaarten Lankhorst 	spin_unlock_irqrestore(&dcb->poll->lock, flags);
1999b495a58SMaarten Lankhorst }
2009b495a58SMaarten Lankhorst 
201afc9a42bSAl Viro static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
2029b495a58SMaarten Lankhorst {
2039b495a58SMaarten Lankhorst 	struct dma_buf *dmabuf;
2049b495a58SMaarten Lankhorst 	struct reservation_object *resv;
20504a5faa8SMaarten Lankhorst 	struct reservation_object_list *fobj;
206f54d1867SChris Wilson 	struct dma_fence *fence_excl;
20701699437SAl Viro 	__poll_t events;
2083c3b177aSMaarten Lankhorst 	unsigned shared_count, seq;
2099b495a58SMaarten Lankhorst 
2109b495a58SMaarten Lankhorst 	dmabuf = file->private_data;
2119b495a58SMaarten Lankhorst 	if (!dmabuf || !dmabuf->resv)
212a9a08845SLinus Torvalds 		return EPOLLERR;
2139b495a58SMaarten Lankhorst 
2149b495a58SMaarten Lankhorst 	resv = dmabuf->resv;
2159b495a58SMaarten Lankhorst 
2169b495a58SMaarten Lankhorst 	poll_wait(file, &dmabuf->poll, poll);
2179b495a58SMaarten Lankhorst 
218a9a08845SLinus Torvalds 	events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
2199b495a58SMaarten Lankhorst 	if (!events)
2209b495a58SMaarten Lankhorst 		return 0;
2219b495a58SMaarten Lankhorst 
2223c3b177aSMaarten Lankhorst retry:
2233c3b177aSMaarten Lankhorst 	seq = read_seqcount_begin(&resv->seq);
2243c3b177aSMaarten Lankhorst 	rcu_read_lock();
2259b495a58SMaarten Lankhorst 
2263c3b177aSMaarten Lankhorst 	fobj = rcu_dereference(resv->fence);
2273c3b177aSMaarten Lankhorst 	if (fobj)
22804a5faa8SMaarten Lankhorst 		shared_count = fobj->shared_count;
2293c3b177aSMaarten Lankhorst 	else
2303c3b177aSMaarten Lankhorst 		shared_count = 0;
2313c3b177aSMaarten Lankhorst 	fence_excl = rcu_dereference(resv->fence_excl);
2323c3b177aSMaarten Lankhorst 	if (read_seqcount_retry(&resv->seq, seq)) {
2333c3b177aSMaarten Lankhorst 		rcu_read_unlock();
2343c3b177aSMaarten Lankhorst 		goto retry;
2353c3b177aSMaarten Lankhorst 	}
23604a5faa8SMaarten Lankhorst 
237a9a08845SLinus Torvalds 	if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
2389b495a58SMaarten Lankhorst 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
239a9a08845SLinus Torvalds 		__poll_t pevents = EPOLLIN;
2409b495a58SMaarten Lankhorst 
24104a5faa8SMaarten Lankhorst 		if (shared_count == 0)
242a9a08845SLinus Torvalds 			pevents |= EPOLLOUT;
2439b495a58SMaarten Lankhorst 
2449b495a58SMaarten Lankhorst 		spin_lock_irq(&dmabuf->poll.lock);
2459b495a58SMaarten Lankhorst 		if (dcb->active) {
2469b495a58SMaarten Lankhorst 			dcb->active |= pevents;
2479b495a58SMaarten Lankhorst 			events &= ~pevents;
2489b495a58SMaarten Lankhorst 		} else
2499b495a58SMaarten Lankhorst 			dcb->active = pevents;
2509b495a58SMaarten Lankhorst 		spin_unlock_irq(&dmabuf->poll.lock);
2519b495a58SMaarten Lankhorst 
2529b495a58SMaarten Lankhorst 		if (events & pevents) {
253f54d1867SChris Wilson 			if (!dma_fence_get_rcu(fence_excl)) {
2543c3b177aSMaarten Lankhorst 				/* force a recheck */
2553c3b177aSMaarten Lankhorst 				events &= ~pevents;
2563c3b177aSMaarten Lankhorst 				dma_buf_poll_cb(NULL, &dcb->cb);
257f54d1867SChris Wilson 			} else if (!dma_fence_add_callback(fence_excl, &dcb->cb,
25804a5faa8SMaarten Lankhorst 							   dma_buf_poll_cb)) {
2599b495a58SMaarten Lankhorst 				events &= ~pevents;
260f54d1867SChris Wilson 				dma_fence_put(fence_excl);
26104a5faa8SMaarten Lankhorst 			} else {
2629b495a58SMaarten Lankhorst 				/*
2639b495a58SMaarten Lankhorst 				 * No callback queued, wake up any additional
2649b495a58SMaarten Lankhorst 				 * waiters.
2659b495a58SMaarten Lankhorst 				 */
266f54d1867SChris Wilson 				dma_fence_put(fence_excl);
2679b495a58SMaarten Lankhorst 				dma_buf_poll_cb(NULL, &dcb->cb);
2689b495a58SMaarten Lankhorst 			}
2699b495a58SMaarten Lankhorst 		}
27004a5faa8SMaarten Lankhorst 	}
2719b495a58SMaarten Lankhorst 
272a9a08845SLinus Torvalds 	if ((events & EPOLLOUT) && shared_count > 0) {
2739b495a58SMaarten Lankhorst 		struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
2749b495a58SMaarten Lankhorst 		int i;
2759b495a58SMaarten Lankhorst 
2769b495a58SMaarten Lankhorst 		/* Only queue a new callback if no event has fired yet */
2779b495a58SMaarten Lankhorst 		spin_lock_irq(&dmabuf->poll.lock);
2789b495a58SMaarten Lankhorst 		if (dcb->active)
279a9a08845SLinus Torvalds 			events &= ~EPOLLOUT;
2809b495a58SMaarten Lankhorst 		else
281a9a08845SLinus Torvalds 			dcb->active = EPOLLOUT;
2829b495a58SMaarten Lankhorst 		spin_unlock_irq(&dmabuf->poll.lock);
2839b495a58SMaarten Lankhorst 
284a9a08845SLinus Torvalds 		if (!(events & EPOLLOUT))
2859b495a58SMaarten Lankhorst 			goto out;
2869b495a58SMaarten Lankhorst 
28704a5faa8SMaarten Lankhorst 		for (i = 0; i < shared_count; ++i) {
288f54d1867SChris Wilson 			struct dma_fence *fence = rcu_dereference(fobj->shared[i]);
28904a5faa8SMaarten Lankhorst 
290f54d1867SChris Wilson 			if (!dma_fence_get_rcu(fence)) {
2913c3b177aSMaarten Lankhorst 				/*
2923c3b177aSMaarten Lankhorst 				 * fence refcount dropped to zero, this means
2933c3b177aSMaarten Lankhorst 				 * that fobj has been freed
2943c3b177aSMaarten Lankhorst 				 *
2953c3b177aSMaarten Lankhorst 				 * call dma_buf_poll_cb and force a recheck!
2963c3b177aSMaarten Lankhorst 				 */
297a9a08845SLinus Torvalds 				events &= ~EPOLLOUT;
2983c3b177aSMaarten Lankhorst 				dma_buf_poll_cb(NULL, &dcb->cb);
2993c3b177aSMaarten Lankhorst 				break;
3003c3b177aSMaarten Lankhorst 			}
301f54d1867SChris Wilson 			if (!dma_fence_add_callback(fence, &dcb->cb,
30204a5faa8SMaarten Lankhorst 						    dma_buf_poll_cb)) {
303f54d1867SChris Wilson 				dma_fence_put(fence);
304a9a08845SLinus Torvalds 				events &= ~EPOLLOUT;
3059b495a58SMaarten Lankhorst 				break;
3069b495a58SMaarten Lankhorst 			}
307f54d1867SChris Wilson 			dma_fence_put(fence);
30804a5faa8SMaarten Lankhorst 		}
3099b495a58SMaarten Lankhorst 
3109b495a58SMaarten Lankhorst 		/* No callback queued, wake up any additional waiters. */
31104a5faa8SMaarten Lankhorst 		if (i == shared_count)
3129b495a58SMaarten Lankhorst 			dma_buf_poll_cb(NULL, &dcb->cb);
3139b495a58SMaarten Lankhorst 	}
3149b495a58SMaarten Lankhorst 
3159b495a58SMaarten Lankhorst out:
3163c3b177aSMaarten Lankhorst 	rcu_read_unlock();
3179b495a58SMaarten Lankhorst 	return events;
3189b495a58SMaarten Lankhorst }
3199b495a58SMaarten Lankhorst 
320*bb2bb903SGreg Hackmann /**
321*bb2bb903SGreg Hackmann  * dma_buf_set_name - Set a name to a specific dma_buf to track the usage.
322*bb2bb903SGreg Hackmann  * The name of the dma-buf buffer can only be set when the dma-buf is not
323*bb2bb903SGreg Hackmann  * attached to any devices. It could theoritically support changing the
324*bb2bb903SGreg Hackmann  * name of the dma-buf if the same piece of memory is used for multiple
325*bb2bb903SGreg Hackmann  * purpose between different devices.
326*bb2bb903SGreg Hackmann  *
327*bb2bb903SGreg Hackmann  * @dmabuf [in]     dmabuf buffer that will be renamed.
328*bb2bb903SGreg Hackmann  * @buf:   [in]     A piece of userspace memory that contains the name of
329*bb2bb903SGreg Hackmann  *                  the dma-buf.
330*bb2bb903SGreg Hackmann  *
331*bb2bb903SGreg Hackmann  * Returns 0 on success. If the dma-buf buffer is already attached to
332*bb2bb903SGreg Hackmann  * devices, return -EBUSY.
333*bb2bb903SGreg Hackmann  *
334*bb2bb903SGreg Hackmann  */
335*bb2bb903SGreg Hackmann static long dma_buf_set_name(struct dma_buf *dmabuf, const char __user *buf)
336*bb2bb903SGreg Hackmann {
337*bb2bb903SGreg Hackmann 	char *name = strndup_user(buf, DMA_BUF_NAME_LEN);
338*bb2bb903SGreg Hackmann 	long ret = 0;
339*bb2bb903SGreg Hackmann 
340*bb2bb903SGreg Hackmann 	if (IS_ERR(name))
341*bb2bb903SGreg Hackmann 		return PTR_ERR(name);
342*bb2bb903SGreg Hackmann 
343*bb2bb903SGreg Hackmann 	mutex_lock(&dmabuf->lock);
344*bb2bb903SGreg Hackmann 	if (!list_empty(&dmabuf->attachments)) {
345*bb2bb903SGreg Hackmann 		ret = -EBUSY;
346*bb2bb903SGreg Hackmann 		kfree(name);
347*bb2bb903SGreg Hackmann 		goto out_unlock;
348*bb2bb903SGreg Hackmann 	}
349*bb2bb903SGreg Hackmann 	kfree(dmabuf->name);
350*bb2bb903SGreg Hackmann 	dmabuf->name = name;
351*bb2bb903SGreg Hackmann 
352*bb2bb903SGreg Hackmann out_unlock:
353*bb2bb903SGreg Hackmann 	mutex_unlock(&dmabuf->lock);
354*bb2bb903SGreg Hackmann 	return ret;
355*bb2bb903SGreg Hackmann }
356*bb2bb903SGreg Hackmann 
357c11e391dSDaniel Vetter static long dma_buf_ioctl(struct file *file,
358c11e391dSDaniel Vetter 			  unsigned int cmd, unsigned long arg)
359c11e391dSDaniel Vetter {
360c11e391dSDaniel Vetter 	struct dma_buf *dmabuf;
361c11e391dSDaniel Vetter 	struct dma_buf_sync sync;
362c11e391dSDaniel Vetter 	enum dma_data_direction direction;
36318b862dcSChris Wilson 	int ret;
364c11e391dSDaniel Vetter 
365c11e391dSDaniel Vetter 	dmabuf = file->private_data;
366c11e391dSDaniel Vetter 
367c11e391dSDaniel Vetter 	switch (cmd) {
368c11e391dSDaniel Vetter 	case DMA_BUF_IOCTL_SYNC:
369c11e391dSDaniel Vetter 		if (copy_from_user(&sync, (void __user *) arg, sizeof(sync)))
370c11e391dSDaniel Vetter 			return -EFAULT;
371c11e391dSDaniel Vetter 
372c11e391dSDaniel Vetter 		if (sync.flags & ~DMA_BUF_SYNC_VALID_FLAGS_MASK)
373c11e391dSDaniel Vetter 			return -EINVAL;
374c11e391dSDaniel Vetter 
375c11e391dSDaniel Vetter 		switch (sync.flags & DMA_BUF_SYNC_RW) {
376c11e391dSDaniel Vetter 		case DMA_BUF_SYNC_READ:
377c11e391dSDaniel Vetter 			direction = DMA_FROM_DEVICE;
378c11e391dSDaniel Vetter 			break;
379c11e391dSDaniel Vetter 		case DMA_BUF_SYNC_WRITE:
380c11e391dSDaniel Vetter 			direction = DMA_TO_DEVICE;
381c11e391dSDaniel Vetter 			break;
382c11e391dSDaniel Vetter 		case DMA_BUF_SYNC_RW:
383c11e391dSDaniel Vetter 			direction = DMA_BIDIRECTIONAL;
384c11e391dSDaniel Vetter 			break;
385c11e391dSDaniel Vetter 		default:
386c11e391dSDaniel Vetter 			return -EINVAL;
387c11e391dSDaniel Vetter 		}
388c11e391dSDaniel Vetter 
389c11e391dSDaniel Vetter 		if (sync.flags & DMA_BUF_SYNC_END)
39018b862dcSChris Wilson 			ret = dma_buf_end_cpu_access(dmabuf, direction);
391c11e391dSDaniel Vetter 		else
39218b862dcSChris Wilson 			ret = dma_buf_begin_cpu_access(dmabuf, direction);
393c11e391dSDaniel Vetter 
39418b862dcSChris Wilson 		return ret;
395*bb2bb903SGreg Hackmann 
396*bb2bb903SGreg Hackmann 	case DMA_BUF_SET_NAME:
397*bb2bb903SGreg Hackmann 		return dma_buf_set_name(dmabuf, (const char __user *)arg);
398*bb2bb903SGreg Hackmann 
399c11e391dSDaniel Vetter 	default:
400c11e391dSDaniel Vetter 		return -ENOTTY;
401c11e391dSDaniel Vetter 	}
402c11e391dSDaniel Vetter }
403c11e391dSDaniel Vetter 
404d15bd7eeSSumit Semwal static const struct file_operations dma_buf_fops = {
405d15bd7eeSSumit Semwal 	.release	= dma_buf_release,
4064c78513eSDaniel Vetter 	.mmap		= dma_buf_mmap_internal,
40719e8697bSChristopher James Halse Rogers 	.llseek		= dma_buf_llseek,
4089b495a58SMaarten Lankhorst 	.poll		= dma_buf_poll,
409c11e391dSDaniel Vetter 	.unlocked_ioctl	= dma_buf_ioctl,
410888022c0SMarek Szyprowski #ifdef CONFIG_COMPAT
411888022c0SMarek Szyprowski 	.compat_ioctl	= dma_buf_ioctl,
412888022c0SMarek Szyprowski #endif
413d15bd7eeSSumit Semwal };
414d15bd7eeSSumit Semwal 
415d15bd7eeSSumit Semwal /*
416d15bd7eeSSumit Semwal  * is_dma_buf_file - Check if struct file* is associated with dma_buf
417d15bd7eeSSumit Semwal  */
418d15bd7eeSSumit Semwal static inline int is_dma_buf_file(struct file *file)
419d15bd7eeSSumit Semwal {
420d15bd7eeSSumit Semwal 	return file->f_op == &dma_buf_fops;
421d15bd7eeSSumit Semwal }
422d15bd7eeSSumit Semwal 
423ed63bb1dSGreg Hackmann static struct file *dma_buf_getfile(struct dma_buf *dmabuf, int flags)
424ed63bb1dSGreg Hackmann {
425ed63bb1dSGreg Hackmann 	struct file *file;
426ed63bb1dSGreg Hackmann 	struct inode *inode = alloc_anon_inode(dma_buf_mnt->mnt_sb);
427ed63bb1dSGreg Hackmann 
428ed63bb1dSGreg Hackmann 	if (IS_ERR(inode))
429ed63bb1dSGreg Hackmann 		return ERR_CAST(inode);
430ed63bb1dSGreg Hackmann 
431ed63bb1dSGreg Hackmann 	inode->i_size = dmabuf->size;
432ed63bb1dSGreg Hackmann 	inode_set_bytes(inode, dmabuf->size);
433ed63bb1dSGreg Hackmann 
434ed63bb1dSGreg Hackmann 	file = alloc_file_pseudo(inode, dma_buf_mnt, "dmabuf",
435ed63bb1dSGreg Hackmann 				 flags, &dma_buf_fops);
436ed63bb1dSGreg Hackmann 	if (IS_ERR(file))
437ed63bb1dSGreg Hackmann 		goto err_alloc_file;
438ed63bb1dSGreg Hackmann 	file->f_flags = flags & (O_ACCMODE | O_NONBLOCK);
439ed63bb1dSGreg Hackmann 	file->private_data = dmabuf;
440*bb2bb903SGreg Hackmann 	file->f_path.dentry->d_fsdata = dmabuf;
441ed63bb1dSGreg Hackmann 
442ed63bb1dSGreg Hackmann 	return file;
443ed63bb1dSGreg Hackmann 
444ed63bb1dSGreg Hackmann err_alloc_file:
445ed63bb1dSGreg Hackmann 	iput(inode);
446ed63bb1dSGreg Hackmann 	return file;
447ed63bb1dSGreg Hackmann }
448ed63bb1dSGreg Hackmann 
449d15bd7eeSSumit Semwal /**
4502904a8c1SDaniel Vetter  * DOC: dma buf device access
4512904a8c1SDaniel Vetter  *
4522904a8c1SDaniel Vetter  * For device DMA access to a shared DMA buffer the usual sequence of operations
4532904a8c1SDaniel Vetter  * is fairly simple:
4542904a8c1SDaniel Vetter  *
4552904a8c1SDaniel Vetter  * 1. The exporter defines his exporter instance using
4562904a8c1SDaniel Vetter  *    DEFINE_DMA_BUF_EXPORT_INFO() and calls dma_buf_export() to wrap a private
4572904a8c1SDaniel Vetter  *    buffer object into a &dma_buf. It then exports that &dma_buf to userspace
4582904a8c1SDaniel Vetter  *    as a file descriptor by calling dma_buf_fd().
4592904a8c1SDaniel Vetter  *
4602904a8c1SDaniel Vetter  * 2. Userspace passes this file-descriptors to all drivers it wants this buffer
4612904a8c1SDaniel Vetter  *    to share with: First the filedescriptor is converted to a &dma_buf using
462c138782dSLiviu Dudau  *    dma_buf_get(). Then the buffer is attached to the device using
4632904a8c1SDaniel Vetter  *    dma_buf_attach().
4642904a8c1SDaniel Vetter  *
4652904a8c1SDaniel Vetter  *    Up to this stage the exporter is still free to migrate or reallocate the
4662904a8c1SDaniel Vetter  *    backing storage.
4672904a8c1SDaniel Vetter  *
468c138782dSLiviu Dudau  * 3. Once the buffer is attached to all devices userspace can initiate DMA
4692904a8c1SDaniel Vetter  *    access to the shared buffer. In the kernel this is done by calling
4702904a8c1SDaniel Vetter  *    dma_buf_map_attachment() and dma_buf_unmap_attachment().
4712904a8c1SDaniel Vetter  *
4722904a8c1SDaniel Vetter  * 4. Once a driver is done with a shared buffer it needs to call
4732904a8c1SDaniel Vetter  *    dma_buf_detach() (after cleaning up any mappings) and then release the
4742904a8c1SDaniel Vetter  *    reference acquired with dma_buf_get by calling dma_buf_put().
4752904a8c1SDaniel Vetter  *
4762904a8c1SDaniel Vetter  * For the detailed semantics exporters are expected to implement see
4772904a8c1SDaniel Vetter  * &dma_buf_ops.
4782904a8c1SDaniel Vetter  */
4792904a8c1SDaniel Vetter 
4802904a8c1SDaniel Vetter /**
481d8fbe341SSumit Semwal  * dma_buf_export - Creates a new dma_buf, and associates an anon file
482d15bd7eeSSumit Semwal  * with this buffer, so it can be exported.
483d15bd7eeSSumit Semwal  * Also connect the allocator specific data and ops to the buffer.
48478df9695SSumit Semwal  * Additionally, provide a name string for exporter; useful in debugging.
485d15bd7eeSSumit Semwal  *
486d8fbe341SSumit Semwal  * @exp_info:	[in]	holds all the export related information provided
487f641d3b5SDaniel Vetter  *			by the exporter. see &struct dma_buf_export_info
488d8fbe341SSumit Semwal  *			for further details.
489d15bd7eeSSumit Semwal  *
490d15bd7eeSSumit Semwal  * Returns, on success, a newly created dma_buf object, which wraps the
491d15bd7eeSSumit Semwal  * supplied private data and operations for dma_buf_ops. On either missing
492d15bd7eeSSumit Semwal  * ops, or error in allocating struct dma_buf, will return negative error.
493d15bd7eeSSumit Semwal  *
4942904a8c1SDaniel Vetter  * For most cases the easiest way to create @exp_info is through the
4952904a8c1SDaniel Vetter  * %DEFINE_DMA_BUF_EXPORT_INFO macro.
496d15bd7eeSSumit Semwal  */
497d8fbe341SSumit Semwal struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
498d15bd7eeSSumit Semwal {
499d15bd7eeSSumit Semwal 	struct dma_buf *dmabuf;
500d8fbe341SSumit Semwal 	struct reservation_object *resv = exp_info->resv;
501d15bd7eeSSumit Semwal 	struct file *file;
5023aac4502SMaarten Lankhorst 	size_t alloc_size = sizeof(struct dma_buf);
503a026df4cSChris Wilson 	int ret;
5045136629dSJagan Teki 
505d8fbe341SSumit Semwal 	if (!exp_info->resv)
5063aac4502SMaarten Lankhorst 		alloc_size += sizeof(struct reservation_object);
5073aac4502SMaarten Lankhorst 	else
5083aac4502SMaarten Lankhorst 		/* prevent &dma_buf[1] == dma_buf->resv */
5093aac4502SMaarten Lankhorst 		alloc_size += 1;
510d15bd7eeSSumit Semwal 
511d8fbe341SSumit Semwal 	if (WARN_ON(!exp_info->priv
512d8fbe341SSumit Semwal 			  || !exp_info->ops
513d8fbe341SSumit Semwal 			  || !exp_info->ops->map_dma_buf
514d8fbe341SSumit Semwal 			  || !exp_info->ops->unmap_dma_buf
515e3a9d6c5SAndrew F. Davis 			  || !exp_info->ops->release)) {
516d15bd7eeSSumit Semwal 		return ERR_PTR(-EINVAL);
517d15bd7eeSSumit Semwal 	}
518d15bd7eeSSumit Semwal 
5199abdffe2SSumit Semwal 	if (!try_module_get(exp_info->owner))
5209abdffe2SSumit Semwal 		return ERR_PTR(-ENOENT);
5219abdffe2SSumit Semwal 
5223aac4502SMaarten Lankhorst 	dmabuf = kzalloc(alloc_size, GFP_KERNEL);
5239abdffe2SSumit Semwal 	if (!dmabuf) {
524a026df4cSChris Wilson 		ret = -ENOMEM;
525a026df4cSChris Wilson 		goto err_module;
5269abdffe2SSumit Semwal 	}
527d15bd7eeSSumit Semwal 
528d8fbe341SSumit Semwal 	dmabuf->priv = exp_info->priv;
529d8fbe341SSumit Semwal 	dmabuf->ops = exp_info->ops;
530d8fbe341SSumit Semwal 	dmabuf->size = exp_info->size;
531d8fbe341SSumit Semwal 	dmabuf->exp_name = exp_info->exp_name;
5329abdffe2SSumit Semwal 	dmabuf->owner = exp_info->owner;
5339b495a58SMaarten Lankhorst 	init_waitqueue_head(&dmabuf->poll);
5349b495a58SMaarten Lankhorst 	dmabuf->cb_excl.poll = dmabuf->cb_shared.poll = &dmabuf->poll;
5359b495a58SMaarten Lankhorst 	dmabuf->cb_excl.active = dmabuf->cb_shared.active = 0;
5369b495a58SMaarten Lankhorst 
5373aac4502SMaarten Lankhorst 	if (!resv) {
5383aac4502SMaarten Lankhorst 		resv = (struct reservation_object *)&dmabuf[1];
5393aac4502SMaarten Lankhorst 		reservation_object_init(resv);
5403aac4502SMaarten Lankhorst 	}
5413aac4502SMaarten Lankhorst 	dmabuf->resv = resv;
542d15bd7eeSSumit Semwal 
543ed63bb1dSGreg Hackmann 	file = dma_buf_getfile(dmabuf, exp_info->flags);
5449022e24eSTuomas Tynkkynen 	if (IS_ERR(file)) {
545a026df4cSChris Wilson 		ret = PTR_ERR(file);
546a026df4cSChris Wilson 		goto err_dmabuf;
5479022e24eSTuomas Tynkkynen 	}
54819e8697bSChristopher James Halse Rogers 
54919e8697bSChristopher James Halse Rogers 	file->f_mode |= FMODE_LSEEK;
550d15bd7eeSSumit Semwal 	dmabuf->file = file;
551d15bd7eeSSumit Semwal 
552d15bd7eeSSumit Semwal 	mutex_init(&dmabuf->lock);
553d15bd7eeSSumit Semwal 	INIT_LIST_HEAD(&dmabuf->attachments);
554d15bd7eeSSumit Semwal 
555b89e3563SSumit Semwal 	mutex_lock(&db_list.lock);
556b89e3563SSumit Semwal 	list_add(&dmabuf->list_node, &db_list.head);
557b89e3563SSumit Semwal 	mutex_unlock(&db_list.lock);
558b89e3563SSumit Semwal 
559d15bd7eeSSumit Semwal 	return dmabuf;
560a026df4cSChris Wilson 
561a026df4cSChris Wilson err_dmabuf:
562a026df4cSChris Wilson 	kfree(dmabuf);
563a026df4cSChris Wilson err_module:
564a026df4cSChris Wilson 	module_put(exp_info->owner);
565a026df4cSChris Wilson 	return ERR_PTR(ret);
566d15bd7eeSSumit Semwal }
567d8fbe341SSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_export);
568d15bd7eeSSumit Semwal 
569d15bd7eeSSumit Semwal /**
570d15bd7eeSSumit Semwal  * dma_buf_fd - returns a file descriptor for the given dma_buf
571d15bd7eeSSumit Semwal  * @dmabuf:	[in]	pointer to dma_buf for which fd is required.
57255c1c4caSDave Airlie  * @flags:      [in]    flags to give to fd
573d15bd7eeSSumit Semwal  *
574d15bd7eeSSumit Semwal  * On success, returns an associated 'fd'. Else, returns error.
575d15bd7eeSSumit Semwal  */
57655c1c4caSDave Airlie int dma_buf_fd(struct dma_buf *dmabuf, int flags)
577d15bd7eeSSumit Semwal {
578f5e097f0SBorislav Petkov 	int fd;
579d15bd7eeSSumit Semwal 
580d15bd7eeSSumit Semwal 	if (!dmabuf || !dmabuf->file)
581d15bd7eeSSumit Semwal 		return -EINVAL;
582d15bd7eeSSumit Semwal 
583f5e097f0SBorislav Petkov 	fd = get_unused_fd_flags(flags);
584f5e097f0SBorislav Petkov 	if (fd < 0)
585f5e097f0SBorislav Petkov 		return fd;
586d15bd7eeSSumit Semwal 
587d15bd7eeSSumit Semwal 	fd_install(fd, dmabuf->file);
588d15bd7eeSSumit Semwal 
589d15bd7eeSSumit Semwal 	return fd;
590d15bd7eeSSumit Semwal }
591d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_fd);
592d15bd7eeSSumit Semwal 
593d15bd7eeSSumit Semwal /**
594d15bd7eeSSumit Semwal  * dma_buf_get - returns the dma_buf structure related to an fd
595d15bd7eeSSumit Semwal  * @fd:	[in]	fd associated with the dma_buf to be returned
596d15bd7eeSSumit Semwal  *
597d15bd7eeSSumit Semwal  * On success, returns the dma_buf structure associated with an fd; uses
598d15bd7eeSSumit Semwal  * file's refcounting done by fget to increase refcount. returns ERR_PTR
599d15bd7eeSSumit Semwal  * otherwise.
600d15bd7eeSSumit Semwal  */
601d15bd7eeSSumit Semwal struct dma_buf *dma_buf_get(int fd)
602d15bd7eeSSumit Semwal {
603d15bd7eeSSumit Semwal 	struct file *file;
604d15bd7eeSSumit Semwal 
605d15bd7eeSSumit Semwal 	file = fget(fd);
606d15bd7eeSSumit Semwal 
607d15bd7eeSSumit Semwal 	if (!file)
608d15bd7eeSSumit Semwal 		return ERR_PTR(-EBADF);
609d15bd7eeSSumit Semwal 
610d15bd7eeSSumit Semwal 	if (!is_dma_buf_file(file)) {
611d15bd7eeSSumit Semwal 		fput(file);
612d15bd7eeSSumit Semwal 		return ERR_PTR(-EINVAL);
613d15bd7eeSSumit Semwal 	}
614d15bd7eeSSumit Semwal 
615d15bd7eeSSumit Semwal 	return file->private_data;
616d15bd7eeSSumit Semwal }
617d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_get);
618d15bd7eeSSumit Semwal 
619d15bd7eeSSumit Semwal /**
620d15bd7eeSSumit Semwal  * dma_buf_put - decreases refcount of the buffer
621d15bd7eeSSumit Semwal  * @dmabuf:	[in]	buffer to reduce refcount of
622d15bd7eeSSumit Semwal  *
6232904a8c1SDaniel Vetter  * Uses file's refcounting done implicitly by fput().
6242904a8c1SDaniel Vetter  *
6252904a8c1SDaniel Vetter  * If, as a result of this call, the refcount becomes 0, the 'release' file
626e9b4d7b5SDaniel Vetter  * operation related to this fd is called. It calls &dma_buf_ops.release vfunc
627e9b4d7b5SDaniel Vetter  * in turn, and frees the memory allocated for dmabuf when exported.
628d15bd7eeSSumit Semwal  */
629d15bd7eeSSumit Semwal void dma_buf_put(struct dma_buf *dmabuf)
630d15bd7eeSSumit Semwal {
631d15bd7eeSSumit Semwal 	if (WARN_ON(!dmabuf || !dmabuf->file))
632d15bd7eeSSumit Semwal 		return;
633d15bd7eeSSumit Semwal 
634d15bd7eeSSumit Semwal 	fput(dmabuf->file);
635d15bd7eeSSumit Semwal }
636d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_put);
637d15bd7eeSSumit Semwal 
638d15bd7eeSSumit Semwal /**
639d15bd7eeSSumit Semwal  * dma_buf_attach - Add the device to dma_buf's attachments list; optionally,
640d15bd7eeSSumit Semwal  * calls attach() of dma_buf_ops to allow device-specific attach functionality
641d15bd7eeSSumit Semwal  * @dmabuf:	[in]	buffer to attach device to.
642d15bd7eeSSumit Semwal  * @dev:	[in]	device to be attached.
643d15bd7eeSSumit Semwal  *
6442904a8c1SDaniel Vetter  * Returns struct dma_buf_attachment pointer for this attachment. Attachments
6452904a8c1SDaniel Vetter  * must be cleaned up by calling dma_buf_detach().
6462904a8c1SDaniel Vetter  *
6472904a8c1SDaniel Vetter  * Returns:
6482904a8c1SDaniel Vetter  *
6492904a8c1SDaniel Vetter  * A pointer to newly created &dma_buf_attachment on success, or a negative
6502904a8c1SDaniel Vetter  * error code wrapped into a pointer on failure.
6512904a8c1SDaniel Vetter  *
6522904a8c1SDaniel Vetter  * Note that this can fail if the backing storage of @dmabuf is in a place not
6532904a8c1SDaniel Vetter  * accessible to @dev, and cannot be moved to a more suitable place. This is
6542904a8c1SDaniel Vetter  * indicated with the error code -EBUSY.
655d15bd7eeSSumit Semwal  */
656d15bd7eeSSumit Semwal struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
657d15bd7eeSSumit Semwal 					  struct device *dev)
658d15bd7eeSSumit Semwal {
659d15bd7eeSSumit Semwal 	struct dma_buf_attachment *attach;
660d15bd7eeSSumit Semwal 	int ret;
661d15bd7eeSSumit Semwal 
662d1aa06a1SLaurent Pinchart 	if (WARN_ON(!dmabuf || !dev))
663d15bd7eeSSumit Semwal 		return ERR_PTR(-EINVAL);
664d15bd7eeSSumit Semwal 
665db7942b6SMarkus Elfring 	attach = kzalloc(sizeof(*attach), GFP_KERNEL);
66634d84ec4SMarkus Elfring 	if (!attach)
667a9fbc3b7SLaurent Pinchart 		return ERR_PTR(-ENOMEM);
668d15bd7eeSSumit Semwal 
669d15bd7eeSSumit Semwal 	attach->dev = dev;
670d15bd7eeSSumit Semwal 	attach->dmabuf = dmabuf;
6712ed9201bSLaurent Pinchart 
6722ed9201bSLaurent Pinchart 	mutex_lock(&dmabuf->lock);
6732ed9201bSLaurent Pinchart 
674d15bd7eeSSumit Semwal 	if (dmabuf->ops->attach) {
675a19741e5SChristian König 		ret = dmabuf->ops->attach(dmabuf, attach);
676d15bd7eeSSumit Semwal 		if (ret)
677d15bd7eeSSumit Semwal 			goto err_attach;
678d15bd7eeSSumit Semwal 	}
679d15bd7eeSSumit Semwal 	list_add(&attach->node, &dmabuf->attachments);
680d15bd7eeSSumit Semwal 
681d15bd7eeSSumit Semwal 	mutex_unlock(&dmabuf->lock);
682f13e143eSChristian König 
683d15bd7eeSSumit Semwal 	return attach;
684d15bd7eeSSumit Semwal 
685d15bd7eeSSumit Semwal err_attach:
686d15bd7eeSSumit Semwal 	kfree(attach);
687d15bd7eeSSumit Semwal 	mutex_unlock(&dmabuf->lock);
688d15bd7eeSSumit Semwal 	return ERR_PTR(ret);
689d15bd7eeSSumit Semwal }
690d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_attach);
691d15bd7eeSSumit Semwal 
692d15bd7eeSSumit Semwal /**
693d15bd7eeSSumit Semwal  * dma_buf_detach - Remove the given attachment from dmabuf's attachments list;
694d15bd7eeSSumit Semwal  * optionally calls detach() of dma_buf_ops for device-specific detach
695d15bd7eeSSumit Semwal  * @dmabuf:	[in]	buffer to detach from.
696d15bd7eeSSumit Semwal  * @attach:	[in]	attachment to be detached; is free'd after this call.
697d15bd7eeSSumit Semwal  *
6982904a8c1SDaniel Vetter  * Clean up a device attachment obtained by calling dma_buf_attach().
699d15bd7eeSSumit Semwal  */
700d15bd7eeSSumit Semwal void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
701d15bd7eeSSumit Semwal {
702d1aa06a1SLaurent Pinchart 	if (WARN_ON(!dmabuf || !attach))
703d15bd7eeSSumit Semwal 		return;
704d15bd7eeSSumit Semwal 
705f13e143eSChristian König 	if (attach->sgt)
706f13e143eSChristian König 		dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
707f13e143eSChristian König 
708d15bd7eeSSumit Semwal 	mutex_lock(&dmabuf->lock);
709d15bd7eeSSumit Semwal 	list_del(&attach->node);
710d15bd7eeSSumit Semwal 	if (dmabuf->ops->detach)
711d15bd7eeSSumit Semwal 		dmabuf->ops->detach(dmabuf, attach);
712d15bd7eeSSumit Semwal 
713d15bd7eeSSumit Semwal 	mutex_unlock(&dmabuf->lock);
714d15bd7eeSSumit Semwal 	kfree(attach);
715d15bd7eeSSumit Semwal }
716d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_detach);
717d15bd7eeSSumit Semwal 
718d15bd7eeSSumit Semwal /**
719d15bd7eeSSumit Semwal  * dma_buf_map_attachment - Returns the scatterlist table of the attachment;
720d15bd7eeSSumit Semwal  * mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
721d15bd7eeSSumit Semwal  * dma_buf_ops.
722d15bd7eeSSumit Semwal  * @attach:	[in]	attachment whose scatterlist is to be returned
723d15bd7eeSSumit Semwal  * @direction:	[in]	direction of DMA transfer
724d15bd7eeSSumit Semwal  *
725fee0c54eSColin Cross  * Returns sg_table containing the scatterlist to be returned; returns ERR_PTR
7262904a8c1SDaniel Vetter  * on error. May return -EINTR if it is interrupted by a signal.
7272904a8c1SDaniel Vetter  *
728c138782dSLiviu Dudau  * A mapping must be unmapped by using dma_buf_unmap_attachment(). Note that
7292904a8c1SDaniel Vetter  * the underlying backing storage is pinned for as long as a mapping exists,
7302904a8c1SDaniel Vetter  * therefore users/importers should not hold onto a mapping for undue amounts of
7312904a8c1SDaniel Vetter  * time.
732d15bd7eeSSumit Semwal  */
733d15bd7eeSSumit Semwal struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
734d15bd7eeSSumit Semwal 					enum dma_data_direction direction)
735d15bd7eeSSumit Semwal {
736531beb06SColin Ian King 	struct sg_table *sg_table;
737d15bd7eeSSumit Semwal 
738d15bd7eeSSumit Semwal 	might_sleep();
739d15bd7eeSSumit Semwal 
740d1aa06a1SLaurent Pinchart 	if (WARN_ON(!attach || !attach->dmabuf))
741d15bd7eeSSumit Semwal 		return ERR_PTR(-EINVAL);
742d15bd7eeSSumit Semwal 
743f13e143eSChristian König 	if (attach->sgt) {
744f13e143eSChristian König 		/*
745f13e143eSChristian König 		 * Two mappings with different directions for the same
746f13e143eSChristian König 		 * attachment are not allowed.
747f13e143eSChristian König 		 */
748f13e143eSChristian König 		if (attach->dir != direction &&
749f13e143eSChristian König 		    attach->dir != DMA_BIDIRECTIONAL)
750f13e143eSChristian König 			return ERR_PTR(-EBUSY);
751f13e143eSChristian König 
752f13e143eSChristian König 		return attach->sgt;
753f13e143eSChristian König 	}
754f13e143eSChristian König 
755d15bd7eeSSumit Semwal 	sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
756fee0c54eSColin Cross 	if (!sg_table)
757fee0c54eSColin Cross 		sg_table = ERR_PTR(-ENOMEM);
758d15bd7eeSSumit Semwal 
759f13e143eSChristian König 	if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
760f13e143eSChristian König 		attach->sgt = sg_table;
761f13e143eSChristian König 		attach->dir = direction;
762f13e143eSChristian König 	}
763f13e143eSChristian König 
764d15bd7eeSSumit Semwal 	return sg_table;
765d15bd7eeSSumit Semwal }
766d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_map_attachment);
767d15bd7eeSSumit Semwal 
768d15bd7eeSSumit Semwal /**
769d15bd7eeSSumit Semwal  * dma_buf_unmap_attachment - unmaps and decreases usecount of the buffer;might
770d15bd7eeSSumit Semwal  * deallocate the scatterlist associated. Is a wrapper for unmap_dma_buf() of
771d15bd7eeSSumit Semwal  * dma_buf_ops.
772d15bd7eeSSumit Semwal  * @attach:	[in]	attachment to unmap buffer from
773d15bd7eeSSumit Semwal  * @sg_table:	[in]	scatterlist info of the buffer to unmap
77433ea2dcbSSumit Semwal  * @direction:  [in]    direction of DMA transfer
775d15bd7eeSSumit Semwal  *
7762904a8c1SDaniel Vetter  * This unmaps a DMA mapping for @attached obtained by dma_buf_map_attachment().
777d15bd7eeSSumit Semwal  */
778d15bd7eeSSumit Semwal void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
77933ea2dcbSSumit Semwal 				struct sg_table *sg_table,
78033ea2dcbSSumit Semwal 				enum dma_data_direction direction)
781d15bd7eeSSumit Semwal {
782b6fa0cd6SRob Clark 	might_sleep();
783b6fa0cd6SRob Clark 
784d1aa06a1SLaurent Pinchart 	if (WARN_ON(!attach || !attach->dmabuf || !sg_table))
785d15bd7eeSSumit Semwal 		return;
786d15bd7eeSSumit Semwal 
787f13e143eSChristian König 	if (attach->sgt == sg_table)
788f13e143eSChristian König 		return;
789f13e143eSChristian König 
790f13e143eSChristian König 	attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
791d15bd7eeSSumit Semwal }
792d15bd7eeSSumit Semwal EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
793fc13020eSDaniel Vetter 
7940959a168SDaniel Vetter /**
7950959a168SDaniel Vetter  * DOC: cpu access
7960959a168SDaniel Vetter  *
7970959a168SDaniel Vetter  * There are mutliple reasons for supporting CPU access to a dma buffer object:
7980959a168SDaniel Vetter  *
7990959a168SDaniel Vetter  * - Fallback operations in the kernel, for example when a device is connected
8000959a168SDaniel Vetter  *   over USB and the kernel needs to shuffle the data around first before
8010959a168SDaniel Vetter  *   sending it away. Cache coherency is handled by braketing any transactions
8020959a168SDaniel Vetter  *   with calls to dma_buf_begin_cpu_access() and dma_buf_end_cpu_access()
8030959a168SDaniel Vetter  *   access.
8040959a168SDaniel Vetter  *
8050959a168SDaniel Vetter  *   To support dma_buf objects residing in highmem cpu access is page-based
8060959a168SDaniel Vetter  *   using an api similar to kmap. Accessing a dma_buf is done in aligned chunks
8070959a168SDaniel Vetter  *   of PAGE_SIZE size. Before accessing a chunk it needs to be mapped, which
8080959a168SDaniel Vetter  *   returns a pointer in kernel virtual address space. Afterwards the chunk
8090959a168SDaniel Vetter  *   needs to be unmapped again. There is no limit on how often a given chunk
8100959a168SDaniel Vetter  *   can be mapped and unmapped, i.e. the importer does not need to call
8110959a168SDaniel Vetter  *   begin_cpu_access again before mapping the same chunk again.
8120959a168SDaniel Vetter  *
8130959a168SDaniel Vetter  *   Interfaces::
8140959a168SDaniel Vetter  *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
8150959a168SDaniel Vetter  *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
8160959a168SDaniel Vetter  *
817f664a526SChristian König  *   Implementing the functions is optional for exporters and for importers all
818f664a526SChristian König  *   the restrictions of using kmap apply.
8190959a168SDaniel Vetter  *
8200959a168SDaniel Vetter  *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
8210959a168SDaniel Vetter  *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
8220959a168SDaniel Vetter  *   the partial chunks at the beginning and end but may return stale or bogus
8230959a168SDaniel Vetter  *   data outside of the range (in these partial chunks).
8240959a168SDaniel Vetter  *
8250959a168SDaniel Vetter  *   For some cases the overhead of kmap can be too high, a vmap interface
8260959a168SDaniel Vetter  *   is introduced. This interface should be used very carefully, as vmalloc
8270959a168SDaniel Vetter  *   space is a limited resources on many architectures.
8280959a168SDaniel Vetter  *
8290959a168SDaniel Vetter  *   Interfaces::
8300959a168SDaniel Vetter  *      void \*dma_buf_vmap(struct dma_buf \*dmabuf)
8310959a168SDaniel Vetter  *      void dma_buf_vunmap(struct dma_buf \*dmabuf, void \*vaddr)
8320959a168SDaniel Vetter  *
8330959a168SDaniel Vetter  *   The vmap call can fail if there is no vmap support in the exporter, or if
8340959a168SDaniel Vetter  *   it runs out of vmalloc space. Fallback to kmap should be implemented. Note
8350959a168SDaniel Vetter  *   that the dma-buf layer keeps a reference count for all vmap access and
8360959a168SDaniel Vetter  *   calls down into the exporter's vmap function only when no vmapping exists,
8370959a168SDaniel Vetter  *   and only unmaps it once. Protection against concurrent vmap/vunmap calls is
8380959a168SDaniel Vetter  *   provided by taking the dma_buf->lock mutex.
8390959a168SDaniel Vetter  *
8400959a168SDaniel Vetter  * - For full compatibility on the importer side with existing userspace
8410959a168SDaniel Vetter  *   interfaces, which might already support mmap'ing buffers. This is needed in
8420959a168SDaniel Vetter  *   many processing pipelines (e.g. feeding a software rendered image into a
8430959a168SDaniel Vetter  *   hardware pipeline, thumbnail creation, snapshots, ...). Also, Android's ION
8440959a168SDaniel Vetter  *   framework already supported this and for DMA buffer file descriptors to
8450959a168SDaniel Vetter  *   replace ION buffers mmap support was needed.
8460959a168SDaniel Vetter  *
8470959a168SDaniel Vetter  *   There is no special interfaces, userspace simply calls mmap on the dma-buf
8480959a168SDaniel Vetter  *   fd. But like for CPU access there's a need to braket the actual access,
8490959a168SDaniel Vetter  *   which is handled by the ioctl (DMA_BUF_IOCTL_SYNC). Note that
8500959a168SDaniel Vetter  *   DMA_BUF_IOCTL_SYNC can fail with -EAGAIN or -EINTR, in which case it must
8510959a168SDaniel Vetter  *   be restarted.
8520959a168SDaniel Vetter  *
8530959a168SDaniel Vetter  *   Some systems might need some sort of cache coherency management e.g. when
8540959a168SDaniel Vetter  *   CPU and GPU domains are being accessed through dma-buf at the same time.
8550959a168SDaniel Vetter  *   To circumvent this problem there are begin/end coherency markers, that
8560959a168SDaniel Vetter  *   forward directly to existing dma-buf device drivers vfunc hooks. Userspace
8570959a168SDaniel Vetter  *   can make use of those markers through the DMA_BUF_IOCTL_SYNC ioctl. The
8580959a168SDaniel Vetter  *   sequence would be used like following:
8590959a168SDaniel Vetter  *
8600959a168SDaniel Vetter  *     - mmap dma-buf fd
8610959a168SDaniel Vetter  *     - for each drawing/upload cycle in CPU 1. SYNC_START ioctl, 2. read/write
8620959a168SDaniel Vetter  *       to mmap area 3. SYNC_END ioctl. This can be repeated as often as you
8630959a168SDaniel Vetter  *       want (with the new data being consumed by say the GPU or the scanout
8640959a168SDaniel Vetter  *       device)
8650959a168SDaniel Vetter  *     - munmap once you don't need the buffer any more
8660959a168SDaniel Vetter  *
8670959a168SDaniel Vetter  *    For correctness and optimal performance, it is always required to use
8680959a168SDaniel Vetter  *    SYNC_START and SYNC_END before and after, respectively, when accessing the
8690959a168SDaniel Vetter  *    mapped address. Userspace cannot rely on coherent access, even when there
8700959a168SDaniel Vetter  *    are systems where it just works without calling these ioctls.
8710959a168SDaniel Vetter  *
8720959a168SDaniel Vetter  * - And as a CPU fallback in userspace processing pipelines.
8730959a168SDaniel Vetter  *
8740959a168SDaniel Vetter  *   Similar to the motivation for kernel cpu access it is again important that
8750959a168SDaniel Vetter  *   the userspace code of a given importing subsystem can use the same
8760959a168SDaniel Vetter  *   interfaces with a imported dma-buf buffer object as with a native buffer
8770959a168SDaniel Vetter  *   object. This is especially important for drm where the userspace part of
8780959a168SDaniel Vetter  *   contemporary OpenGL, X, and other drivers is huge, and reworking them to
8790959a168SDaniel Vetter  *   use a different way to mmap a buffer rather invasive.
8800959a168SDaniel Vetter  *
8810959a168SDaniel Vetter  *   The assumption in the current dma-buf interfaces is that redirecting the
8820959a168SDaniel Vetter  *   initial mmap is all that's needed. A survey of some of the existing
8830959a168SDaniel Vetter  *   subsystems shows that no driver seems to do any nefarious thing like
8840959a168SDaniel Vetter  *   syncing up with outstanding asynchronous processing on the device or
8850959a168SDaniel Vetter  *   allocating special resources at fault time. So hopefully this is good
8860959a168SDaniel Vetter  *   enough, since adding interfaces to intercept pagefaults and allow pte
8870959a168SDaniel Vetter  *   shootdowns would increase the complexity quite a bit.
8880959a168SDaniel Vetter  *
8890959a168SDaniel Vetter  *   Interface::
8900959a168SDaniel Vetter  *      int dma_buf_mmap(struct dma_buf \*, struct vm_area_struct \*,
8910959a168SDaniel Vetter  *		       unsigned long);
8920959a168SDaniel Vetter  *
8930959a168SDaniel Vetter  *   If the importing subsystem simply provides a special-purpose mmap call to
8940959a168SDaniel Vetter  *   set up a mapping in userspace, calling do_mmap with dma_buf->file will
8950959a168SDaniel Vetter  *   equally achieve that for a dma-buf object.
8960959a168SDaniel Vetter  */
8970959a168SDaniel Vetter 
898ae4e46b1SChris Wilson static int __dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
899ae4e46b1SChris Wilson 				      enum dma_data_direction direction)
900ae4e46b1SChris Wilson {
901ae4e46b1SChris Wilson 	bool write = (direction == DMA_BIDIRECTIONAL ||
902ae4e46b1SChris Wilson 		      direction == DMA_TO_DEVICE);
903ae4e46b1SChris Wilson 	struct reservation_object *resv = dmabuf->resv;
904ae4e46b1SChris Wilson 	long ret;
905ae4e46b1SChris Wilson 
906ae4e46b1SChris Wilson 	/* Wait on any implicit rendering fences */
907ae4e46b1SChris Wilson 	ret = reservation_object_wait_timeout_rcu(resv, write, true,
908ae4e46b1SChris Wilson 						  MAX_SCHEDULE_TIMEOUT);
909ae4e46b1SChris Wilson 	if (ret < 0)
910ae4e46b1SChris Wilson 		return ret;
911ae4e46b1SChris Wilson 
912ae4e46b1SChris Wilson 	return 0;
913ae4e46b1SChris Wilson }
914fc13020eSDaniel Vetter 
915fc13020eSDaniel Vetter /**
916fc13020eSDaniel Vetter  * dma_buf_begin_cpu_access - Must be called before accessing a dma_buf from the
917fc13020eSDaniel Vetter  * cpu in the kernel context. Calls begin_cpu_access to allow exporter-specific
918fc13020eSDaniel Vetter  * preparations. Coherency is only guaranteed in the specified range for the
919fc13020eSDaniel Vetter  * specified access direction.
920efb4df82SRandy Dunlap  * @dmabuf:	[in]	buffer to prepare cpu access for.
921fc13020eSDaniel Vetter  * @direction:	[in]	length of range for cpu access.
922fc13020eSDaniel Vetter  *
9230959a168SDaniel Vetter  * After the cpu access is complete the caller should call
9240959a168SDaniel Vetter  * dma_buf_end_cpu_access(). Only when cpu access is braketed by both calls is
9250959a168SDaniel Vetter  * it guaranteed to be coherent with other DMA access.
9260959a168SDaniel Vetter  *
927fc13020eSDaniel Vetter  * Can return negative error values, returns 0 on success.
928fc13020eSDaniel Vetter  */
929831e9da7STiago Vignatti int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
930fc13020eSDaniel Vetter 			     enum dma_data_direction direction)
931fc13020eSDaniel Vetter {
932fc13020eSDaniel Vetter 	int ret = 0;
933fc13020eSDaniel Vetter 
934fc13020eSDaniel Vetter 	if (WARN_ON(!dmabuf))
935fc13020eSDaniel Vetter 		return -EINVAL;
936fc13020eSDaniel Vetter 
937fc13020eSDaniel Vetter 	if (dmabuf->ops->begin_cpu_access)
938831e9da7STiago Vignatti 		ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
939fc13020eSDaniel Vetter 
940ae4e46b1SChris Wilson 	/* Ensure that all fences are waited upon - but we first allow
941ae4e46b1SChris Wilson 	 * the native handler the chance to do so more efficiently if it
942ae4e46b1SChris Wilson 	 * chooses. A double invocation here will be reasonably cheap no-op.
943ae4e46b1SChris Wilson 	 */
944ae4e46b1SChris Wilson 	if (ret == 0)
945ae4e46b1SChris Wilson 		ret = __dma_buf_begin_cpu_access(dmabuf, direction);
946ae4e46b1SChris Wilson 
947fc13020eSDaniel Vetter 	return ret;
948fc13020eSDaniel Vetter }
949fc13020eSDaniel Vetter EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
950fc13020eSDaniel Vetter 
951fc13020eSDaniel Vetter /**
952fc13020eSDaniel Vetter  * dma_buf_end_cpu_access - Must be called after accessing a dma_buf from the
953fc13020eSDaniel Vetter  * cpu in the kernel context. Calls end_cpu_access to allow exporter-specific
954fc13020eSDaniel Vetter  * actions. Coherency is only guaranteed in the specified range for the
955fc13020eSDaniel Vetter  * specified access direction.
956efb4df82SRandy Dunlap  * @dmabuf:	[in]	buffer to complete cpu access for.
957fc13020eSDaniel Vetter  * @direction:	[in]	length of range for cpu access.
958fc13020eSDaniel Vetter  *
9590959a168SDaniel Vetter  * This terminates CPU access started with dma_buf_begin_cpu_access().
9600959a168SDaniel Vetter  *
96187e332d5SDaniel Vetter  * Can return negative error values, returns 0 on success.
962fc13020eSDaniel Vetter  */
96318b862dcSChris Wilson int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
964fc13020eSDaniel Vetter 			   enum dma_data_direction direction)
965fc13020eSDaniel Vetter {
96618b862dcSChris Wilson 	int ret = 0;
96718b862dcSChris Wilson 
968fc13020eSDaniel Vetter 	WARN_ON(!dmabuf);
969fc13020eSDaniel Vetter 
970fc13020eSDaniel Vetter 	if (dmabuf->ops->end_cpu_access)
97118b862dcSChris Wilson 		ret = dmabuf->ops->end_cpu_access(dmabuf, direction);
97218b862dcSChris Wilson 
97318b862dcSChris Wilson 	return ret;
974fc13020eSDaniel Vetter }
975fc13020eSDaniel Vetter EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
976fc13020eSDaniel Vetter 
977fc13020eSDaniel Vetter /**
978fc13020eSDaniel Vetter  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
979fc13020eSDaniel Vetter  * same restrictions as for kmap and friends apply.
980efb4df82SRandy Dunlap  * @dmabuf:	[in]	buffer to map page from.
981fc13020eSDaniel Vetter  * @page_num:	[in]	page in PAGE_SIZE units to map.
982fc13020eSDaniel Vetter  *
983fc13020eSDaniel Vetter  * This call must always succeed, any necessary preparations that might fail
984fc13020eSDaniel Vetter  * need to be done in begin_cpu_access.
985fc13020eSDaniel Vetter  */
986fc13020eSDaniel Vetter void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
987fc13020eSDaniel Vetter {
988fc13020eSDaniel Vetter 	WARN_ON(!dmabuf);
989fc13020eSDaniel Vetter 
99009ea0dfbSGerd Hoffmann 	if (!dmabuf->ops->map)
99109ea0dfbSGerd Hoffmann 		return NULL;
992f9b67f00SLogan Gunthorpe 	return dmabuf->ops->map(dmabuf, page_num);
993fc13020eSDaniel Vetter }
994fc13020eSDaniel Vetter EXPORT_SYMBOL_GPL(dma_buf_kmap);
995fc13020eSDaniel Vetter 
996fc13020eSDaniel Vetter /**
997fc13020eSDaniel Vetter  * dma_buf_kunmap - Unmap a page obtained by dma_buf_kmap.
998efb4df82SRandy Dunlap  * @dmabuf:	[in]	buffer to unmap page from.
999fc13020eSDaniel Vetter  * @page_num:	[in]	page in PAGE_SIZE units to unmap.
1000fc13020eSDaniel Vetter  * @vaddr:	[in]	kernel space pointer obtained from dma_buf_kmap.
1001fc13020eSDaniel Vetter  *
1002fc13020eSDaniel Vetter  * This call must always succeed.
1003fc13020eSDaniel Vetter  */
1004fc13020eSDaniel Vetter void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
1005fc13020eSDaniel Vetter 		    void *vaddr)
1006fc13020eSDaniel Vetter {
1007fc13020eSDaniel Vetter 	WARN_ON(!dmabuf);
1008fc13020eSDaniel Vetter 
1009f9b67f00SLogan Gunthorpe 	if (dmabuf->ops->unmap)
1010f9b67f00SLogan Gunthorpe 		dmabuf->ops->unmap(dmabuf, page_num, vaddr);
1011fc13020eSDaniel Vetter }
1012fc13020eSDaniel Vetter EXPORT_SYMBOL_GPL(dma_buf_kunmap);
10134c78513eSDaniel Vetter 
10144c78513eSDaniel Vetter 
10154c78513eSDaniel Vetter /**
10164c78513eSDaniel Vetter  * dma_buf_mmap - Setup up a userspace mmap with the given vma
101712c4727eSSumit Semwal  * @dmabuf:	[in]	buffer that should back the vma
10184c78513eSDaniel Vetter  * @vma:	[in]	vma for the mmap
10194c78513eSDaniel Vetter  * @pgoff:	[in]	offset in pages where this mmap should start within the
10204c78513eSDaniel Vetter  *			dma-buf buffer.
10214c78513eSDaniel Vetter  *
10224c78513eSDaniel Vetter  * This function adjusts the passed in vma so that it points at the file of the
1023ecf1dbacSJavier Martinez Canillas  * dma_buf operation. It also adjusts the starting pgoff and does bounds
10244c78513eSDaniel Vetter  * checking on the size of the vma. Then it calls the exporters mmap function to
10254c78513eSDaniel Vetter  * set up the mapping.
10264c78513eSDaniel Vetter  *
10274c78513eSDaniel Vetter  * Can return negative error values, returns 0 on success.
10284c78513eSDaniel Vetter  */
10294c78513eSDaniel Vetter int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
10304c78513eSDaniel Vetter 		 unsigned long pgoff)
10314c78513eSDaniel Vetter {
1032495c10ccSJohn Sheu 	struct file *oldfile;
1033495c10ccSJohn Sheu 	int ret;
1034495c10ccSJohn Sheu 
10354c78513eSDaniel Vetter 	if (WARN_ON(!dmabuf || !vma))
10364c78513eSDaniel Vetter 		return -EINVAL;
10374c78513eSDaniel Vetter 
1038e3a9d6c5SAndrew F. Davis 	/* check if buffer supports mmap */
1039e3a9d6c5SAndrew F. Davis 	if (!dmabuf->ops->mmap)
1040e3a9d6c5SAndrew F. Davis 		return -EINVAL;
1041e3a9d6c5SAndrew F. Davis 
10424c78513eSDaniel Vetter 	/* check for offset overflow */
1043b02da6f8SMuhammad Falak R Wani 	if (pgoff + vma_pages(vma) < pgoff)
10444c78513eSDaniel Vetter 		return -EOVERFLOW;
10454c78513eSDaniel Vetter 
10464c78513eSDaniel Vetter 	/* check for overflowing the buffer's size */
1047b02da6f8SMuhammad Falak R Wani 	if (pgoff + vma_pages(vma) >
10484c78513eSDaniel Vetter 	    dmabuf->size >> PAGE_SHIFT)
10494c78513eSDaniel Vetter 		return -EINVAL;
10504c78513eSDaniel Vetter 
10514c78513eSDaniel Vetter 	/* readjust the vma */
1052495c10ccSJohn Sheu 	get_file(dmabuf->file);
1053495c10ccSJohn Sheu 	oldfile = vma->vm_file;
1054495c10ccSJohn Sheu 	vma->vm_file = dmabuf->file;
10554c78513eSDaniel Vetter 	vma->vm_pgoff = pgoff;
10564c78513eSDaniel Vetter 
1057495c10ccSJohn Sheu 	ret = dmabuf->ops->mmap(dmabuf, vma);
1058495c10ccSJohn Sheu 	if (ret) {
1059495c10ccSJohn Sheu 		/* restore old parameters on failure */
1060495c10ccSJohn Sheu 		vma->vm_file = oldfile;
1061495c10ccSJohn Sheu 		fput(dmabuf->file);
1062495c10ccSJohn Sheu 	} else {
1063495c10ccSJohn Sheu 		if (oldfile)
1064495c10ccSJohn Sheu 			fput(oldfile);
1065495c10ccSJohn Sheu 	}
1066495c10ccSJohn Sheu 	return ret;
1067495c10ccSJohn Sheu 
10684c78513eSDaniel Vetter }
10694c78513eSDaniel Vetter EXPORT_SYMBOL_GPL(dma_buf_mmap);
107098f86c9eSDave Airlie 
107198f86c9eSDave Airlie /**
107212c4727eSSumit Semwal  * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
107312c4727eSSumit Semwal  * address space. Same restrictions as for vmap and friends apply.
107412c4727eSSumit Semwal  * @dmabuf:	[in]	buffer to vmap
107598f86c9eSDave Airlie  *
107698f86c9eSDave Airlie  * This call may fail due to lack of virtual mapping address space.
107798f86c9eSDave Airlie  * These calls are optional in drivers. The intended use for them
107898f86c9eSDave Airlie  * is for mapping objects linear in kernel space for high use objects.
107998f86c9eSDave Airlie  * Please attempt to use kmap/kunmap before thinking about these interfaces.
1080fee0c54eSColin Cross  *
1081fee0c54eSColin Cross  * Returns NULL on error.
108298f86c9eSDave Airlie  */
108398f86c9eSDave Airlie void *dma_buf_vmap(struct dma_buf *dmabuf)
108498f86c9eSDave Airlie {
1085f00b4dadSDaniel Vetter 	void *ptr;
1086f00b4dadSDaniel Vetter 
108798f86c9eSDave Airlie 	if (WARN_ON(!dmabuf))
108898f86c9eSDave Airlie 		return NULL;
108998f86c9eSDave Airlie 
1090f00b4dadSDaniel Vetter 	if (!dmabuf->ops->vmap)
109198f86c9eSDave Airlie 		return NULL;
1092f00b4dadSDaniel Vetter 
1093f00b4dadSDaniel Vetter 	mutex_lock(&dmabuf->lock);
1094f00b4dadSDaniel Vetter 	if (dmabuf->vmapping_counter) {
1095f00b4dadSDaniel Vetter 		dmabuf->vmapping_counter++;
1096f00b4dadSDaniel Vetter 		BUG_ON(!dmabuf->vmap_ptr);
1097f00b4dadSDaniel Vetter 		ptr = dmabuf->vmap_ptr;
1098f00b4dadSDaniel Vetter 		goto out_unlock;
1099f00b4dadSDaniel Vetter 	}
1100f00b4dadSDaniel Vetter 
1101f00b4dadSDaniel Vetter 	BUG_ON(dmabuf->vmap_ptr);
1102f00b4dadSDaniel Vetter 
1103f00b4dadSDaniel Vetter 	ptr = dmabuf->ops->vmap(dmabuf);
1104fee0c54eSColin Cross 	if (WARN_ON_ONCE(IS_ERR(ptr)))
1105fee0c54eSColin Cross 		ptr = NULL;
1106fee0c54eSColin Cross 	if (!ptr)
1107f00b4dadSDaniel Vetter 		goto out_unlock;
1108f00b4dadSDaniel Vetter 
1109f00b4dadSDaniel Vetter 	dmabuf->vmap_ptr = ptr;
1110f00b4dadSDaniel Vetter 	dmabuf->vmapping_counter = 1;
1111f00b4dadSDaniel Vetter 
1112f00b4dadSDaniel Vetter out_unlock:
1113f00b4dadSDaniel Vetter 	mutex_unlock(&dmabuf->lock);
1114f00b4dadSDaniel Vetter 	return ptr;
111598f86c9eSDave Airlie }
111698f86c9eSDave Airlie EXPORT_SYMBOL_GPL(dma_buf_vmap);
111798f86c9eSDave Airlie 
111898f86c9eSDave Airlie /**
111998f86c9eSDave Airlie  * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
112012c4727eSSumit Semwal  * @dmabuf:	[in]	buffer to vunmap
11216e7b4a59SRandy Dunlap  * @vaddr:	[in]	vmap to vunmap
112298f86c9eSDave Airlie  */
112398f86c9eSDave Airlie void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
112498f86c9eSDave Airlie {
112598f86c9eSDave Airlie 	if (WARN_ON(!dmabuf))
112698f86c9eSDave Airlie 		return;
112798f86c9eSDave Airlie 
1128f00b4dadSDaniel Vetter 	BUG_ON(!dmabuf->vmap_ptr);
1129f00b4dadSDaniel Vetter 	BUG_ON(dmabuf->vmapping_counter == 0);
1130f00b4dadSDaniel Vetter 	BUG_ON(dmabuf->vmap_ptr != vaddr);
1131f00b4dadSDaniel Vetter 
1132f00b4dadSDaniel Vetter 	mutex_lock(&dmabuf->lock);
1133f00b4dadSDaniel Vetter 	if (--dmabuf->vmapping_counter == 0) {
113498f86c9eSDave Airlie 		if (dmabuf->ops->vunmap)
113598f86c9eSDave Airlie 			dmabuf->ops->vunmap(dmabuf, vaddr);
1136f00b4dadSDaniel Vetter 		dmabuf->vmap_ptr = NULL;
1137f00b4dadSDaniel Vetter 	}
1138f00b4dadSDaniel Vetter 	mutex_unlock(&dmabuf->lock);
113998f86c9eSDave Airlie }
114098f86c9eSDave Airlie EXPORT_SYMBOL_GPL(dma_buf_vunmap);
1141b89e3563SSumit Semwal 
1142b89e3563SSumit Semwal #ifdef CONFIG_DEBUG_FS
1143eb0b947eSMathias Krause static int dma_buf_debug_show(struct seq_file *s, void *unused)
1144b89e3563SSumit Semwal {
1145b89e3563SSumit Semwal 	int ret;
1146b89e3563SSumit Semwal 	struct dma_buf *buf_obj;
1147b89e3563SSumit Semwal 	struct dma_buf_attachment *attach_obj;
11485eb2c72cSRussell King 	struct reservation_object *robj;
11495eb2c72cSRussell King 	struct reservation_object_list *fobj;
11505eb2c72cSRussell King 	struct dma_fence *fence;
11515eb2c72cSRussell King 	unsigned seq;
11525eb2c72cSRussell King 	int count = 0, attach_count, shared_count, i;
1153b89e3563SSumit Semwal 	size_t size = 0;
1154b89e3563SSumit Semwal 
1155b89e3563SSumit Semwal 	ret = mutex_lock_interruptible(&db_list.lock);
1156b89e3563SSumit Semwal 
1157b89e3563SSumit Semwal 	if (ret)
1158b89e3563SSumit Semwal 		return ret;
1159b89e3563SSumit Semwal 
1160c0b00a52SSumit Semwal 	seq_puts(s, "\nDma-buf Objects:\n");
1161ed63bb1dSGreg Hackmann 	seq_printf(s, "%-8s\t%-8s\t%-8s\t%-8s\texp_name\t%-8s\n",
1162ed63bb1dSGreg Hackmann 		   "size", "flags", "mode", "count", "ino");
1163b89e3563SSumit Semwal 
1164b89e3563SSumit Semwal 	list_for_each_entry(buf_obj, &db_list.head, list_node) {
1165b89e3563SSumit Semwal 		ret = mutex_lock_interruptible(&buf_obj->lock);
1166b89e3563SSumit Semwal 
1167b89e3563SSumit Semwal 		if (ret) {
1168c0b00a52SSumit Semwal 			seq_puts(s,
1169b89e3563SSumit Semwal 				 "\tERROR locking buffer object: skipping\n");
1170b89e3563SSumit Semwal 			continue;
1171b89e3563SSumit Semwal 		}
1172b89e3563SSumit Semwal 
1173*bb2bb903SGreg Hackmann 		seq_printf(s, "%08zu\t%08x\t%08x\t%08ld\t%s\t%08lu\t%s\n",
1174c0b00a52SSumit Semwal 				buf_obj->size,
1175b89e3563SSumit Semwal 				buf_obj->file->f_flags, buf_obj->file->f_mode,
1176a1f6dbacSAl Viro 				file_count(buf_obj->file),
1177ed63bb1dSGreg Hackmann 				buf_obj->exp_name,
1178*bb2bb903SGreg Hackmann 				file_inode(buf_obj->file)->i_ino,
1179*bb2bb903SGreg Hackmann 				buf_obj->name ?: "");
1180b89e3563SSumit Semwal 
11815eb2c72cSRussell King 		robj = buf_obj->resv;
11825eb2c72cSRussell King 		while (true) {
11835eb2c72cSRussell King 			seq = read_seqcount_begin(&robj->seq);
11845eb2c72cSRussell King 			rcu_read_lock();
11855eb2c72cSRussell King 			fobj = rcu_dereference(robj->fence);
11865eb2c72cSRussell King 			shared_count = fobj ? fobj->shared_count : 0;
11875eb2c72cSRussell King 			fence = rcu_dereference(robj->fence_excl);
11885eb2c72cSRussell King 			if (!read_seqcount_retry(&robj->seq, seq))
11895eb2c72cSRussell King 				break;
11905eb2c72cSRussell King 			rcu_read_unlock();
11915eb2c72cSRussell King 		}
11925eb2c72cSRussell King 
11935eb2c72cSRussell King 		if (fence)
11945eb2c72cSRussell King 			seq_printf(s, "\tExclusive fence: %s %s %ssignalled\n",
11955eb2c72cSRussell King 				   fence->ops->get_driver_name(fence),
11965eb2c72cSRussell King 				   fence->ops->get_timeline_name(fence),
11975eb2c72cSRussell King 				   dma_fence_is_signaled(fence) ? "" : "un");
11985eb2c72cSRussell King 		for (i = 0; i < shared_count; i++) {
11995eb2c72cSRussell King 			fence = rcu_dereference(fobj->shared[i]);
12005eb2c72cSRussell King 			if (!dma_fence_get_rcu(fence))
12015eb2c72cSRussell King 				continue;
12025eb2c72cSRussell King 			seq_printf(s, "\tShared fence: %s %s %ssignalled\n",
12035eb2c72cSRussell King 				   fence->ops->get_driver_name(fence),
12045eb2c72cSRussell King 				   fence->ops->get_timeline_name(fence),
12055eb2c72cSRussell King 				   dma_fence_is_signaled(fence) ? "" : "un");
12065e383a97SJérôme Glisse 			dma_fence_put(fence);
12075eb2c72cSRussell King 		}
12085eb2c72cSRussell King 		rcu_read_unlock();
12095eb2c72cSRussell King 
1210c0b00a52SSumit Semwal 		seq_puts(s, "\tAttached Devices:\n");
1211b89e3563SSumit Semwal 		attach_count = 0;
1212b89e3563SSumit Semwal 
1213b89e3563SSumit Semwal 		list_for_each_entry(attach_obj, &buf_obj->attachments, node) {
12149eddb41dSMarkus Elfring 			seq_printf(s, "\t%s\n", dev_name(attach_obj->dev));
1215b89e3563SSumit Semwal 			attach_count++;
1216b89e3563SSumit Semwal 		}
1217b89e3563SSumit Semwal 
1218c0b00a52SSumit Semwal 		seq_printf(s, "Total %d devices attached\n\n",
1219b89e3563SSumit Semwal 				attach_count);
1220b89e3563SSumit Semwal 
1221b89e3563SSumit Semwal 		count++;
1222b89e3563SSumit Semwal 		size += buf_obj->size;
1223b89e3563SSumit Semwal 		mutex_unlock(&buf_obj->lock);
1224b89e3563SSumit Semwal 	}
1225b89e3563SSumit Semwal 
1226b89e3563SSumit Semwal 	seq_printf(s, "\nTotal %d objects, %zu bytes\n", count, size);
1227b89e3563SSumit Semwal 
1228b89e3563SSumit Semwal 	mutex_unlock(&db_list.lock);
1229b89e3563SSumit Semwal 	return 0;
1230b89e3563SSumit Semwal }
1231b89e3563SSumit Semwal 
12322674305aSYangtao Li DEFINE_SHOW_ATTRIBUTE(dma_buf_debug);
1233b89e3563SSumit Semwal 
1234b89e3563SSumit Semwal static struct dentry *dma_buf_debugfs_dir;
1235b89e3563SSumit Semwal 
1236b89e3563SSumit Semwal static int dma_buf_init_debugfs(void)
1237b89e3563SSumit Semwal {
1238bd3e2208SMathias Krause 	struct dentry *d;
1239b89e3563SSumit Semwal 	int err = 0;
12405136629dSJagan Teki 
1241bd3e2208SMathias Krause 	d = debugfs_create_dir("dma_buf", NULL);
1242bd3e2208SMathias Krause 	if (IS_ERR(d))
1243bd3e2208SMathias Krause 		return PTR_ERR(d);
12445136629dSJagan Teki 
1245bd3e2208SMathias Krause 	dma_buf_debugfs_dir = d;
1246b89e3563SSumit Semwal 
1247bd3e2208SMathias Krause 	d = debugfs_create_file("bufinfo", S_IRUGO, dma_buf_debugfs_dir,
1248bd3e2208SMathias Krause 				NULL, &dma_buf_debug_fops);
1249bd3e2208SMathias Krause 	if (IS_ERR(d)) {
1250b89e3563SSumit Semwal 		pr_debug("dma_buf: debugfs: failed to create node bufinfo\n");
1251b7479990SMathias Krause 		debugfs_remove_recursive(dma_buf_debugfs_dir);
1252b7479990SMathias Krause 		dma_buf_debugfs_dir = NULL;
1253bd3e2208SMathias Krause 		err = PTR_ERR(d);
1254b7479990SMathias Krause 	}
1255b89e3563SSumit Semwal 
1256b89e3563SSumit Semwal 	return err;
1257b89e3563SSumit Semwal }
1258b89e3563SSumit Semwal 
1259b89e3563SSumit Semwal static void dma_buf_uninit_debugfs(void)
1260b89e3563SSumit Semwal {
1261b89e3563SSumit Semwal 	debugfs_remove_recursive(dma_buf_debugfs_dir);
1262b89e3563SSumit Semwal }
1263b89e3563SSumit Semwal #else
1264b89e3563SSumit Semwal static inline int dma_buf_init_debugfs(void)
1265b89e3563SSumit Semwal {
1266b89e3563SSumit Semwal 	return 0;
1267b89e3563SSumit Semwal }
1268b89e3563SSumit Semwal static inline void dma_buf_uninit_debugfs(void)
1269b89e3563SSumit Semwal {
1270b89e3563SSumit Semwal }
1271b89e3563SSumit Semwal #endif
1272b89e3563SSumit Semwal 
1273b89e3563SSumit Semwal static int __init dma_buf_init(void)
1274b89e3563SSumit Semwal {
1275ed63bb1dSGreg Hackmann 	dma_buf_mnt = kern_mount(&dma_buf_fs_type);
1276ed63bb1dSGreg Hackmann 	if (IS_ERR(dma_buf_mnt))
1277ed63bb1dSGreg Hackmann 		return PTR_ERR(dma_buf_mnt);
1278ed63bb1dSGreg Hackmann 
1279b89e3563SSumit Semwal 	mutex_init(&db_list.lock);
1280b89e3563SSumit Semwal 	INIT_LIST_HEAD(&db_list.head);
1281b89e3563SSumit Semwal 	dma_buf_init_debugfs();
1282b89e3563SSumit Semwal 	return 0;
1283b89e3563SSumit Semwal }
1284b89e3563SSumit Semwal subsys_initcall(dma_buf_init);
1285b89e3563SSumit Semwal 
1286b89e3563SSumit Semwal static void __exit dma_buf_deinit(void)
1287b89e3563SSumit Semwal {
1288b89e3563SSumit Semwal 	dma_buf_uninit_debugfs();
1289ed63bb1dSGreg Hackmann 	kern_unmount(dma_buf_mnt);
1290b89e3563SSumit Semwal }
1291b89e3563SSumit Semwal __exitcall(dma_buf_deinit);
1292