xref: /linux/drivers/rapidio/devices/rio_mport_cdev.c (revision 2f4c53349961c8ca480193e47da4d44fdb8335a8)
1*2874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2e8de3701SAlexandre Bounine /*
3e8de3701SAlexandre Bounine  * RapidIO mport character device
4e8de3701SAlexandre Bounine  *
5e8de3701SAlexandre Bounine  * Copyright 2014-2015 Integrated Device Technology, Inc.
6e8de3701SAlexandre Bounine  *    Alexandre Bounine <alexandre.bounine@idt.com>
7e8de3701SAlexandre Bounine  * Copyright 2014-2015 Prodrive Technologies
8e8de3701SAlexandre Bounine  *    Andre van Herk <andre.van.herk@prodrive-technologies.com>
9e8de3701SAlexandre Bounine  *    Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>
10e8de3701SAlexandre Bounine  * Copyright (C) 2014 Texas Instruments Incorporated
11e8de3701SAlexandre Bounine  *    Aurelien Jacquiot <a-jacquiot@ti.com>
12e8de3701SAlexandre Bounine  */
13e8de3701SAlexandre Bounine #include <linux/module.h>
14e8de3701SAlexandre Bounine #include <linux/kernel.h>
15e8de3701SAlexandre Bounine #include <linux/cdev.h>
16e8de3701SAlexandre Bounine #include <linux/ioctl.h>
17e8de3701SAlexandre Bounine #include <linux/uaccess.h>
18e8de3701SAlexandre Bounine #include <linux/list.h>
19e8de3701SAlexandre Bounine #include <linux/fs.h>
20e8de3701SAlexandre Bounine #include <linux/err.h>
21e8de3701SAlexandre Bounine #include <linux/net.h>
22e8de3701SAlexandre Bounine #include <linux/poll.h>
23e8de3701SAlexandre Bounine #include <linux/spinlock.h>
24e8de3701SAlexandre Bounine #include <linux/sched.h>
25e8de3701SAlexandre Bounine #include <linux/kfifo.h>
26e8de3701SAlexandre Bounine 
27e8de3701SAlexandre Bounine #include <linux/mm.h>
28e8de3701SAlexandre Bounine #include <linux/slab.h>
29e8de3701SAlexandre Bounine #include <linux/vmalloc.h>
30e8de3701SAlexandre Bounine #include <linux/mman.h>
31e8de3701SAlexandre Bounine 
32e8de3701SAlexandre Bounine #include <linux/dma-mapping.h>
33e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
34e8de3701SAlexandre Bounine #include <linux/dmaengine.h>
35e8de3701SAlexandre Bounine #endif
36e8de3701SAlexandre Bounine 
37e8de3701SAlexandre Bounine #include <linux/rio.h>
38e8de3701SAlexandre Bounine #include <linux/rio_ids.h>
39e8de3701SAlexandre Bounine #include <linux/rio_drv.h>
40e8de3701SAlexandre Bounine #include <linux/rio_mport_cdev.h>
41e8de3701SAlexandre Bounine 
42e8de3701SAlexandre Bounine #include "../rio.h"
43e8de3701SAlexandre Bounine 
44e8de3701SAlexandre Bounine #define DRV_NAME	"rio_mport"
45e8de3701SAlexandre Bounine #define DRV_PREFIX	DRV_NAME ": "
46e8de3701SAlexandre Bounine #define DEV_NAME	"rio_mport"
47e8de3701SAlexandre Bounine #define DRV_VERSION     "1.0.0"
48e8de3701SAlexandre Bounine 
49e8de3701SAlexandre Bounine /* Debug output filtering masks */
50e8de3701SAlexandre Bounine enum {
51e8de3701SAlexandre Bounine 	DBG_NONE	= 0,
52e8de3701SAlexandre Bounine 	DBG_INIT	= BIT(0), /* driver init */
53e8de3701SAlexandre Bounine 	DBG_EXIT	= BIT(1), /* driver exit */
54e8de3701SAlexandre Bounine 	DBG_MPORT	= BIT(2), /* mport add/remove */
55e8de3701SAlexandre Bounine 	DBG_RDEV	= BIT(3), /* RapidIO device add/remove */
56e8de3701SAlexandre Bounine 	DBG_DMA		= BIT(4), /* DMA transfer messages */
57e8de3701SAlexandre Bounine 	DBG_MMAP	= BIT(5), /* mapping messages */
58e8de3701SAlexandre Bounine 	DBG_IBW		= BIT(6), /* inbound window */
59e8de3701SAlexandre Bounine 	DBG_EVENT	= BIT(7), /* event handling messages */
60e8de3701SAlexandre Bounine 	DBG_OBW		= BIT(8), /* outbound window messages */
61e8de3701SAlexandre Bounine 	DBG_DBELL	= BIT(9), /* doorbell messages */
62e8de3701SAlexandre Bounine 	DBG_ALL		= ~0,
63e8de3701SAlexandre Bounine };
64e8de3701SAlexandre Bounine 
65e8de3701SAlexandre Bounine #ifdef DEBUG
66e8de3701SAlexandre Bounine #define rmcd_debug(level, fmt, arg...)		\
67e8de3701SAlexandre Bounine 	do {					\
68e8de3701SAlexandre Bounine 		if (DBG_##level & dbg_level)	\
69e8de3701SAlexandre Bounine 			pr_debug(DRV_PREFIX "%s: " fmt "\n", __func__, ##arg); \
70e8de3701SAlexandre Bounine 	} while (0)
71e8de3701SAlexandre Bounine #else
72e8de3701SAlexandre Bounine #define rmcd_debug(level, fmt, arg...) \
73e8de3701SAlexandre Bounine 		no_printk(KERN_DEBUG pr_fmt(DRV_PREFIX fmt "\n"), ##arg)
74e8de3701SAlexandre Bounine #endif
75e8de3701SAlexandre Bounine 
76e8de3701SAlexandre Bounine #define rmcd_warn(fmt, arg...) \
77e8de3701SAlexandre Bounine 	pr_warn(DRV_PREFIX "%s WARNING " fmt "\n", __func__, ##arg)
78e8de3701SAlexandre Bounine 
79e8de3701SAlexandre Bounine #define rmcd_error(fmt, arg...) \
80e8de3701SAlexandre Bounine 	pr_err(DRV_PREFIX "%s ERROR " fmt "\n", __func__, ##arg)
81e8de3701SAlexandre Bounine 
82e8de3701SAlexandre Bounine MODULE_AUTHOR("Jerry Jacobs <jerry.jacobs@prodrive-technologies.com>");
83e8de3701SAlexandre Bounine MODULE_AUTHOR("Aurelien Jacquiot <a-jacquiot@ti.com>");
84e8de3701SAlexandre Bounine MODULE_AUTHOR("Alexandre Bounine <alexandre.bounine@idt.com>");
85e8de3701SAlexandre Bounine MODULE_AUTHOR("Andre van Herk <andre.van.herk@prodrive-technologies.com>");
86e8de3701SAlexandre Bounine MODULE_DESCRIPTION("RapidIO mport character device driver");
87e8de3701SAlexandre Bounine MODULE_LICENSE("GPL");
88e8de3701SAlexandre Bounine MODULE_VERSION(DRV_VERSION);
89e8de3701SAlexandre Bounine 
90e8de3701SAlexandre Bounine static int dma_timeout = 3000; /* DMA transfer timeout in msec */
91e8de3701SAlexandre Bounine module_param(dma_timeout, int, S_IRUGO);
92e8de3701SAlexandre Bounine MODULE_PARM_DESC(dma_timeout, "DMA Transfer Timeout in msec (default: 3000)");
93e8de3701SAlexandre Bounine 
94e8de3701SAlexandre Bounine #ifdef DEBUG
95e8de3701SAlexandre Bounine static u32 dbg_level = DBG_NONE;
96e8de3701SAlexandre Bounine module_param(dbg_level, uint, S_IWUSR | S_IWGRP | S_IRUGO);
97e8de3701SAlexandre Bounine MODULE_PARM_DESC(dbg_level, "Debugging output level (default 0 = none)");
98e8de3701SAlexandre Bounine #endif
99e8de3701SAlexandre Bounine 
100e8de3701SAlexandre Bounine /*
101e8de3701SAlexandre Bounine  * An internal DMA coherent buffer
102e8de3701SAlexandre Bounine  */
103e8de3701SAlexandre Bounine struct mport_dma_buf {
104e8de3701SAlexandre Bounine 	void		*ib_base;
105e8de3701SAlexandre Bounine 	dma_addr_t	ib_phys;
106e8de3701SAlexandre Bounine 	u32		ib_size;
107e8de3701SAlexandre Bounine 	u64		ib_rio_base;
108e8de3701SAlexandre Bounine 	bool		ib_map;
109e8de3701SAlexandre Bounine 	struct file	*filp;
110e8de3701SAlexandre Bounine };
111e8de3701SAlexandre Bounine 
112e8de3701SAlexandre Bounine /*
113e8de3701SAlexandre Bounine  * Internal memory mapping structure
114e8de3701SAlexandre Bounine  */
115e8de3701SAlexandre Bounine enum rio_mport_map_dir {
116e8de3701SAlexandre Bounine 	MAP_INBOUND,
117e8de3701SAlexandre Bounine 	MAP_OUTBOUND,
118e8de3701SAlexandre Bounine 	MAP_DMA,
119e8de3701SAlexandre Bounine };
120e8de3701SAlexandre Bounine 
121e8de3701SAlexandre Bounine struct rio_mport_mapping {
122e8de3701SAlexandre Bounine 	struct list_head node;
123e8de3701SAlexandre Bounine 	struct mport_dev *md;
124e8de3701SAlexandre Bounine 	enum rio_mport_map_dir dir;
1254e1016daSAlexandre Bounine 	u16 rioid;
126e8de3701SAlexandre Bounine 	u64 rio_addr;
127e8de3701SAlexandre Bounine 	dma_addr_t phys_addr; /* for mmap */
128e8de3701SAlexandre Bounine 	void *virt_addr; /* kernel address, for dma_free_coherent */
129e8de3701SAlexandre Bounine 	u64 size;
130e8de3701SAlexandre Bounine 	struct kref ref; /* refcount of vmas sharing the mapping */
131e8de3701SAlexandre Bounine 	struct file *filp;
132e8de3701SAlexandre Bounine };
133e8de3701SAlexandre Bounine 
134e8de3701SAlexandre Bounine struct rio_mport_dma_map {
135e8de3701SAlexandre Bounine 	int valid;
1364e1016daSAlexandre Bounine 	u64 length;
137e8de3701SAlexandre Bounine 	void *vaddr;
138e8de3701SAlexandre Bounine 	dma_addr_t paddr;
139e8de3701SAlexandre Bounine };
140e8de3701SAlexandre Bounine 
141e8de3701SAlexandre Bounine #define MPORT_MAX_DMA_BUFS	16
142e8de3701SAlexandre Bounine #define MPORT_EVENT_DEPTH	10
143e8de3701SAlexandre Bounine 
144e8de3701SAlexandre Bounine /*
145e8de3701SAlexandre Bounine  * mport_dev  driver-specific structure that represents mport device
146e8de3701SAlexandre Bounine  * @active    mport device status flag
147e8de3701SAlexandre Bounine  * @node      list node to maintain list of registered mports
148e8de3701SAlexandre Bounine  * @cdev      character device
149e8de3701SAlexandre Bounine  * @dev       associated device object
150e8de3701SAlexandre Bounine  * @mport     associated subsystem's master port device object
151e8de3701SAlexandre Bounine  * @buf_mutex lock for buffer handling
152e8de3701SAlexandre Bounine  * @file_mutex - lock for open files list
153e8de3701SAlexandre Bounine  * @file_list  - list of open files on given mport
154e8de3701SAlexandre Bounine  * @properties properties of this mport
155e8de3701SAlexandre Bounine  * @portwrites queue of inbound portwrites
156e8de3701SAlexandre Bounine  * @pw_lock    lock for port write queue
157e8de3701SAlexandre Bounine  * @mappings   queue for memory mappings
158e8de3701SAlexandre Bounine  * @dma_chan   DMA channels associated with this device
159e8de3701SAlexandre Bounine  * @dma_ref:
160e8de3701SAlexandre Bounine  * @comp:
161e8de3701SAlexandre Bounine  */
162e8de3701SAlexandre Bounine struct mport_dev {
163e8de3701SAlexandre Bounine 	atomic_t		active;
164e8de3701SAlexandre Bounine 	struct list_head	node;
165e8de3701SAlexandre Bounine 	struct cdev		cdev;
166e8de3701SAlexandre Bounine 	struct device		dev;
167e8de3701SAlexandre Bounine 	struct rio_mport	*mport;
168e8de3701SAlexandre Bounine 	struct mutex		buf_mutex;
169e8de3701SAlexandre Bounine 	struct mutex		file_mutex;
170e8de3701SAlexandre Bounine 	struct list_head	file_list;
171e8de3701SAlexandre Bounine 	struct rio_mport_properties	properties;
172e8de3701SAlexandre Bounine 	struct list_head		doorbells;
173e8de3701SAlexandre Bounine 	spinlock_t			db_lock;
174e8de3701SAlexandre Bounine 	struct list_head		portwrites;
175e8de3701SAlexandre Bounine 	spinlock_t			pw_lock;
176e8de3701SAlexandre Bounine 	struct list_head	mappings;
177e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
178e8de3701SAlexandre Bounine 	struct dma_chan *dma_chan;
179e8de3701SAlexandre Bounine 	struct kref	dma_ref;
180e8de3701SAlexandre Bounine 	struct completion comp;
181e8de3701SAlexandre Bounine #endif
182e8de3701SAlexandre Bounine };
183e8de3701SAlexandre Bounine 
184e8de3701SAlexandre Bounine /*
185e8de3701SAlexandre Bounine  * mport_cdev_priv - data structure specific to individual file object
186e8de3701SAlexandre Bounine  *                   associated with an open device
187e8de3701SAlexandre Bounine  * @md    master port character device object
188e8de3701SAlexandre Bounine  * @async_queue - asynchronous notification queue
189e8de3701SAlexandre Bounine  * @list - file objects tracking list
190e8de3701SAlexandre Bounine  * @db_filters    inbound doorbell filters for this descriptor
191e8de3701SAlexandre Bounine  * @pw_filters    portwrite filters for this descriptor
192e8de3701SAlexandre Bounine  * @event_fifo    event fifo for this descriptor
193e8de3701SAlexandre Bounine  * @event_rx_wait wait queue for this descriptor
194e8de3701SAlexandre Bounine  * @fifo_lock     lock for event_fifo
195e8de3701SAlexandre Bounine  * @event_mask    event mask for this descriptor
196e8de3701SAlexandre Bounine  * @dmach DMA engine channel allocated for specific file object
197e8de3701SAlexandre Bounine  */
198e8de3701SAlexandre Bounine struct mport_cdev_priv {
199e8de3701SAlexandre Bounine 	struct mport_dev	*md;
200e8de3701SAlexandre Bounine 	struct fasync_struct	*async_queue;
201e8de3701SAlexandre Bounine 	struct list_head	list;
202e8de3701SAlexandre Bounine 	struct list_head	db_filters;
203e8de3701SAlexandre Bounine 	struct list_head        pw_filters;
204e8de3701SAlexandre Bounine 	struct kfifo            event_fifo;
205e8de3701SAlexandre Bounine 	wait_queue_head_t       event_rx_wait;
206e8de3701SAlexandre Bounine 	spinlock_t              fifo_lock;
2074e1016daSAlexandre Bounine 	u32			event_mask; /* RIO_DOORBELL, RIO_PORTWRITE */
208e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
209e8de3701SAlexandre Bounine 	struct dma_chan		*dmach;
210e8de3701SAlexandre Bounine 	struct list_head	async_list;
211e8de3701SAlexandre Bounine 	spinlock_t              req_lock;
212e8de3701SAlexandre Bounine 	struct mutex		dma_lock;
213e8de3701SAlexandre Bounine 	struct kref		dma_ref;
214e8de3701SAlexandre Bounine 	struct completion	comp;
215e8de3701SAlexandre Bounine #endif
216e8de3701SAlexandre Bounine };
217e8de3701SAlexandre Bounine 
218e8de3701SAlexandre Bounine /*
219e8de3701SAlexandre Bounine  * rio_mport_pw_filter - structure to describe a portwrite filter
220e8de3701SAlexandre Bounine  * md_node   node in mport device's list
221e8de3701SAlexandre Bounine  * priv_node node in private file object's list
222e8de3701SAlexandre Bounine  * priv      reference to private data
223e8de3701SAlexandre Bounine  * filter    actual portwrite filter
224e8de3701SAlexandre Bounine  */
225e8de3701SAlexandre Bounine struct rio_mport_pw_filter {
226e8de3701SAlexandre Bounine 	struct list_head md_node;
227e8de3701SAlexandre Bounine 	struct list_head priv_node;
228e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
229e8de3701SAlexandre Bounine 	struct rio_pw_filter filter;
230e8de3701SAlexandre Bounine };
231e8de3701SAlexandre Bounine 
232e8de3701SAlexandre Bounine /*
233e8de3701SAlexandre Bounine  * rio_mport_db_filter - structure to describe a doorbell filter
234e8de3701SAlexandre Bounine  * @data_node reference to device node
235e8de3701SAlexandre Bounine  * @priv_node node in private data
236e8de3701SAlexandre Bounine  * @priv      reference to private data
237e8de3701SAlexandre Bounine  * @filter    actual doorbell filter
238e8de3701SAlexandre Bounine  */
239e8de3701SAlexandre Bounine struct rio_mport_db_filter {
240e8de3701SAlexandre Bounine 	struct list_head data_node;
241e8de3701SAlexandre Bounine 	struct list_head priv_node;
242e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
243e8de3701SAlexandre Bounine 	struct rio_doorbell_filter filter;
244e8de3701SAlexandre Bounine };
245e8de3701SAlexandre Bounine 
246e8de3701SAlexandre Bounine static LIST_HEAD(mport_devs);
247e8de3701SAlexandre Bounine static DEFINE_MUTEX(mport_devs_lock);
248e8de3701SAlexandre Bounine 
249e8de3701SAlexandre Bounine #if (0) /* used by commented out portion of poll function : FIXME */
250e8de3701SAlexandre Bounine static DECLARE_WAIT_QUEUE_HEAD(mport_cdev_wait);
251e8de3701SAlexandre Bounine #endif
252e8de3701SAlexandre Bounine 
253e8de3701SAlexandre Bounine static struct class *dev_class;
254e8de3701SAlexandre Bounine static dev_t dev_number;
255e8de3701SAlexandre Bounine 
256e8de3701SAlexandre Bounine static void mport_release_mapping(struct kref *ref);
257e8de3701SAlexandre Bounine 
258e8de3701SAlexandre Bounine static int rio_mport_maint_rd(struct mport_cdev_priv *priv, void __user *arg,
259e8de3701SAlexandre Bounine 			      int local)
260e8de3701SAlexandre Bounine {
261e8de3701SAlexandre Bounine 	struct rio_mport *mport = priv->md->mport;
262e8de3701SAlexandre Bounine 	struct rio_mport_maint_io maint_io;
263e8de3701SAlexandre Bounine 	u32 *buffer;
264e8de3701SAlexandre Bounine 	u32 offset;
265e8de3701SAlexandre Bounine 	size_t length;
266e8de3701SAlexandre Bounine 	int ret, i;
267e8de3701SAlexandre Bounine 
268e8de3701SAlexandre Bounine 	if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
269e8de3701SAlexandre Bounine 		return -EFAULT;
270e8de3701SAlexandre Bounine 
271e8de3701SAlexandre Bounine 	if ((maint_io.offset % 4) ||
2724e1016daSAlexandre Bounine 	    (maint_io.length == 0) || (maint_io.length % 4) ||
2734e1016daSAlexandre Bounine 	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
274e8de3701SAlexandre Bounine 		return -EINVAL;
275e8de3701SAlexandre Bounine 
276e8de3701SAlexandre Bounine 	buffer = vmalloc(maint_io.length);
277e8de3701SAlexandre Bounine 	if (buffer == NULL)
278e8de3701SAlexandre Bounine 		return -ENOMEM;
279e8de3701SAlexandre Bounine 	length = maint_io.length/sizeof(u32);
280e8de3701SAlexandre Bounine 	offset = maint_io.offset;
281e8de3701SAlexandre Bounine 
282e8de3701SAlexandre Bounine 	for (i = 0; i < length; i++) {
283e8de3701SAlexandre Bounine 		if (local)
284e8de3701SAlexandre Bounine 			ret = __rio_local_read_config_32(mport,
285e8de3701SAlexandre Bounine 				offset, &buffer[i]);
286e8de3701SAlexandre Bounine 		else
287e8de3701SAlexandre Bounine 			ret = rio_mport_read_config_32(mport, maint_io.rioid,
288e8de3701SAlexandre Bounine 				maint_io.hopcount, offset, &buffer[i]);
289e8de3701SAlexandre Bounine 		if (ret)
290e8de3701SAlexandre Bounine 			goto out;
291e8de3701SAlexandre Bounine 
292e8de3701SAlexandre Bounine 		offset += 4;
293e8de3701SAlexandre Bounine 	}
294e8de3701SAlexandre Bounine 
2954e1016daSAlexandre Bounine 	if (unlikely(copy_to_user((void __user *)(uintptr_t)maint_io.buffer,
2964e1016daSAlexandre Bounine 				   buffer, maint_io.length)))
297e8de3701SAlexandre Bounine 		ret = -EFAULT;
298e8de3701SAlexandre Bounine out:
299e8de3701SAlexandre Bounine 	vfree(buffer);
300e8de3701SAlexandre Bounine 	return ret;
301e8de3701SAlexandre Bounine }
302e8de3701SAlexandre Bounine 
303e8de3701SAlexandre Bounine static int rio_mport_maint_wr(struct mport_cdev_priv *priv, void __user *arg,
304e8de3701SAlexandre Bounine 			      int local)
305e8de3701SAlexandre Bounine {
306e8de3701SAlexandre Bounine 	struct rio_mport *mport = priv->md->mport;
307e8de3701SAlexandre Bounine 	struct rio_mport_maint_io maint_io;
308e8de3701SAlexandre Bounine 	u32 *buffer;
309e8de3701SAlexandre Bounine 	u32 offset;
310e8de3701SAlexandre Bounine 	size_t length;
311e8de3701SAlexandre Bounine 	int ret = -EINVAL, i;
312e8de3701SAlexandre Bounine 
313e8de3701SAlexandre Bounine 	if (unlikely(copy_from_user(&maint_io, arg, sizeof(maint_io))))
314e8de3701SAlexandre Bounine 		return -EFAULT;
315e8de3701SAlexandre Bounine 
316e8de3701SAlexandre Bounine 	if ((maint_io.offset % 4) ||
3174e1016daSAlexandre Bounine 	    (maint_io.length == 0) || (maint_io.length % 4) ||
3184e1016daSAlexandre Bounine 	    (maint_io.length + maint_io.offset) > RIO_MAINT_SPACE_SZ)
319e8de3701SAlexandre Bounine 		return -EINVAL;
320e8de3701SAlexandre Bounine 
321e8de3701SAlexandre Bounine 	buffer = vmalloc(maint_io.length);
322e8de3701SAlexandre Bounine 	if (buffer == NULL)
323e8de3701SAlexandre Bounine 		return -ENOMEM;
324e8de3701SAlexandre Bounine 	length = maint_io.length;
325e8de3701SAlexandre Bounine 
3264e1016daSAlexandre Bounine 	if (unlikely(copy_from_user(buffer,
3274e1016daSAlexandre Bounine 			(void __user *)(uintptr_t)maint_io.buffer, length))) {
328e8de3701SAlexandre Bounine 		ret = -EFAULT;
329e8de3701SAlexandre Bounine 		goto out;
330e8de3701SAlexandre Bounine 	}
331e8de3701SAlexandre Bounine 
332e8de3701SAlexandre Bounine 	offset = maint_io.offset;
333e8de3701SAlexandre Bounine 	length /= sizeof(u32);
334e8de3701SAlexandre Bounine 
335e8de3701SAlexandre Bounine 	for (i = 0; i < length; i++) {
336e8de3701SAlexandre Bounine 		if (local)
337e8de3701SAlexandre Bounine 			ret = __rio_local_write_config_32(mport,
338e8de3701SAlexandre Bounine 							  offset, buffer[i]);
339e8de3701SAlexandre Bounine 		else
340e8de3701SAlexandre Bounine 			ret = rio_mport_write_config_32(mport, maint_io.rioid,
341e8de3701SAlexandre Bounine 							maint_io.hopcount,
342e8de3701SAlexandre Bounine 							offset, buffer[i]);
343e8de3701SAlexandre Bounine 		if (ret)
344e8de3701SAlexandre Bounine 			goto out;
345e8de3701SAlexandre Bounine 
346e8de3701SAlexandre Bounine 		offset += 4;
347e8de3701SAlexandre Bounine 	}
348e8de3701SAlexandre Bounine 
349e8de3701SAlexandre Bounine out:
350e8de3701SAlexandre Bounine 	vfree(buffer);
351e8de3701SAlexandre Bounine 	return ret;
352e8de3701SAlexandre Bounine }
353e8de3701SAlexandre Bounine 
354e8de3701SAlexandre Bounine 
355e8de3701SAlexandre Bounine /*
356e8de3701SAlexandre Bounine  * Inbound/outbound memory mapping functions
357e8de3701SAlexandre Bounine  */
358e8de3701SAlexandre Bounine static int
359e8de3701SAlexandre Bounine rio_mport_create_outbound_mapping(struct mport_dev *md, struct file *filp,
3604e1016daSAlexandre Bounine 				  u16 rioid, u64 raddr, u32 size,
361e8de3701SAlexandre Bounine 				  dma_addr_t *paddr)
362e8de3701SAlexandre Bounine {
363e8de3701SAlexandre Bounine 	struct rio_mport *mport = md->mport;
364e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
365e8de3701SAlexandre Bounine 	int ret;
366e8de3701SAlexandre Bounine 
367e8de3701SAlexandre Bounine 	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%x", rioid, raddr, size);
368e8de3701SAlexandre Bounine 
3694e1016daSAlexandre Bounine 	map = kzalloc(sizeof(*map), GFP_KERNEL);
370e8de3701SAlexandre Bounine 	if (map == NULL)
371e8de3701SAlexandre Bounine 		return -ENOMEM;
372e8de3701SAlexandre Bounine 
373e8de3701SAlexandre Bounine 	ret = rio_map_outb_region(mport, rioid, raddr, size, 0, paddr);
374e8de3701SAlexandre Bounine 	if (ret < 0)
375e8de3701SAlexandre Bounine 		goto err_map_outb;
376e8de3701SAlexandre Bounine 
377e8de3701SAlexandre Bounine 	map->dir = MAP_OUTBOUND;
378e8de3701SAlexandre Bounine 	map->rioid = rioid;
379e8de3701SAlexandre Bounine 	map->rio_addr = raddr;
380e8de3701SAlexandre Bounine 	map->size = size;
381e8de3701SAlexandre Bounine 	map->phys_addr = *paddr;
382e8de3701SAlexandre Bounine 	map->filp = filp;
383e8de3701SAlexandre Bounine 	map->md = md;
384e8de3701SAlexandre Bounine 	kref_init(&map->ref);
385e8de3701SAlexandre Bounine 	list_add_tail(&map->node, &md->mappings);
386e8de3701SAlexandre Bounine 	return 0;
387e8de3701SAlexandre Bounine err_map_outb:
388e8de3701SAlexandre Bounine 	kfree(map);
389e8de3701SAlexandre Bounine 	return ret;
390e8de3701SAlexandre Bounine }
391e8de3701SAlexandre Bounine 
392e8de3701SAlexandre Bounine static int
393e8de3701SAlexandre Bounine rio_mport_get_outbound_mapping(struct mport_dev *md, struct file *filp,
3944e1016daSAlexandre Bounine 			       u16 rioid, u64 raddr, u32 size,
395e8de3701SAlexandre Bounine 			       dma_addr_t *paddr)
396e8de3701SAlexandre Bounine {
397e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
398e8de3701SAlexandre Bounine 	int err = -ENOMEM;
399e8de3701SAlexandre Bounine 
400e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
401e8de3701SAlexandre Bounine 	list_for_each_entry(map, &md->mappings, node) {
402e8de3701SAlexandre Bounine 		if (map->dir != MAP_OUTBOUND)
403e8de3701SAlexandre Bounine 			continue;
404e8de3701SAlexandre Bounine 		if (rioid == map->rioid &&
405e8de3701SAlexandre Bounine 		    raddr == map->rio_addr && size == map->size) {
406e8de3701SAlexandre Bounine 			*paddr = map->phys_addr;
407e8de3701SAlexandre Bounine 			err = 0;
408e8de3701SAlexandre Bounine 			break;
409e8de3701SAlexandre Bounine 		} else if (rioid == map->rioid &&
410e8de3701SAlexandre Bounine 			   raddr < (map->rio_addr + map->size - 1) &&
411e8de3701SAlexandre Bounine 			   (raddr + size) > map->rio_addr) {
412e8de3701SAlexandre Bounine 			err = -EBUSY;
413e8de3701SAlexandre Bounine 			break;
414e8de3701SAlexandre Bounine 		}
415e8de3701SAlexandre Bounine 	}
416e8de3701SAlexandre Bounine 
417e8de3701SAlexandre Bounine 	/* If not found, create new */
418e8de3701SAlexandre Bounine 	if (err == -ENOMEM)
419e8de3701SAlexandre Bounine 		err = rio_mport_create_outbound_mapping(md, filp, rioid, raddr,
420e8de3701SAlexandre Bounine 						size, paddr);
421e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
422e8de3701SAlexandre Bounine 	return err;
423e8de3701SAlexandre Bounine }
424e8de3701SAlexandre Bounine 
425e8de3701SAlexandre Bounine static int rio_mport_obw_map(struct file *filp, void __user *arg)
426e8de3701SAlexandre Bounine {
427e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
428e8de3701SAlexandre Bounine 	struct mport_dev *data = priv->md;
429e8de3701SAlexandre Bounine 	struct rio_mmap map;
430e8de3701SAlexandre Bounine 	dma_addr_t paddr;
431e8de3701SAlexandre Bounine 	int ret;
432e8de3701SAlexandre Bounine 
4334e1016daSAlexandre Bounine 	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
434e8de3701SAlexandre Bounine 		return -EFAULT;
435e8de3701SAlexandre Bounine 
436e8de3701SAlexandre Bounine 	rmcd_debug(OBW, "did=%d ra=0x%llx sz=0x%llx",
437e8de3701SAlexandre Bounine 		   map.rioid, map.rio_addr, map.length);
438e8de3701SAlexandre Bounine 
439e8de3701SAlexandre Bounine 	ret = rio_mport_get_outbound_mapping(data, filp, map.rioid,
440e8de3701SAlexandre Bounine 					     map.rio_addr, map.length, &paddr);
441e8de3701SAlexandre Bounine 	if (ret < 0) {
442e8de3701SAlexandre Bounine 		rmcd_error("Failed to set OBW err= %d", ret);
443e8de3701SAlexandre Bounine 		return ret;
444e8de3701SAlexandre Bounine 	}
445e8de3701SAlexandre Bounine 
446e8de3701SAlexandre Bounine 	map.handle = paddr;
447e8de3701SAlexandre Bounine 
4484e1016daSAlexandre Bounine 	if (unlikely(copy_to_user(arg, &map, sizeof(map))))
449e8de3701SAlexandre Bounine 		return -EFAULT;
450e8de3701SAlexandre Bounine 	return 0;
451e8de3701SAlexandre Bounine }
452e8de3701SAlexandre Bounine 
453e8de3701SAlexandre Bounine /*
454e8de3701SAlexandre Bounine  * rio_mport_obw_free() - unmap an OutBound Window from RapidIO address space
455e8de3701SAlexandre Bounine  *
456e8de3701SAlexandre Bounine  * @priv: driver private data
457e8de3701SAlexandre Bounine  * @arg:  buffer handle returned by allocation routine
458e8de3701SAlexandre Bounine  */
459e8de3701SAlexandre Bounine static int rio_mport_obw_free(struct file *filp, void __user *arg)
460e8de3701SAlexandre Bounine {
461e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
462e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
463e8de3701SAlexandre Bounine 	u64 handle;
464e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map, *_map;
465e8de3701SAlexandre Bounine 
466e8de3701SAlexandre Bounine 	if (!md->mport->ops->unmap_outb)
467e8de3701SAlexandre Bounine 		return -EPROTONOSUPPORT;
468e8de3701SAlexandre Bounine 
4694e1016daSAlexandre Bounine 	if (copy_from_user(&handle, arg, sizeof(handle)))
470e8de3701SAlexandre Bounine 		return -EFAULT;
471e8de3701SAlexandre Bounine 
472e8de3701SAlexandre Bounine 	rmcd_debug(OBW, "h=0x%llx", handle);
473e8de3701SAlexandre Bounine 
474e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
475e8de3701SAlexandre Bounine 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
476e8de3701SAlexandre Bounine 		if (map->dir == MAP_OUTBOUND && map->phys_addr == handle) {
477e8de3701SAlexandre Bounine 			if (map->filp == filp) {
478e8de3701SAlexandre Bounine 				rmcd_debug(OBW, "kref_put h=0x%llx", handle);
479e8de3701SAlexandre Bounine 				map->filp = NULL;
480e8de3701SAlexandre Bounine 				kref_put(&map->ref, mport_release_mapping);
481e8de3701SAlexandre Bounine 			}
482e8de3701SAlexandre Bounine 			break;
483e8de3701SAlexandre Bounine 		}
484e8de3701SAlexandre Bounine 	}
485e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
486e8de3701SAlexandre Bounine 
487e8de3701SAlexandre Bounine 	return 0;
488e8de3701SAlexandre Bounine }
489e8de3701SAlexandre Bounine 
490e8de3701SAlexandre Bounine /*
491e8de3701SAlexandre Bounine  * maint_hdid_set() - Set the host Device ID
492e8de3701SAlexandre Bounine  * @priv: driver private data
493e8de3701SAlexandre Bounine  * @arg:	Device Id
494e8de3701SAlexandre Bounine  */
495e8de3701SAlexandre Bounine static int maint_hdid_set(struct mport_cdev_priv *priv, void __user *arg)
496e8de3701SAlexandre Bounine {
497e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
4984e1016daSAlexandre Bounine 	u16 hdid;
499e8de3701SAlexandre Bounine 
5004e1016daSAlexandre Bounine 	if (copy_from_user(&hdid, arg, sizeof(hdid)))
501e8de3701SAlexandre Bounine 		return -EFAULT;
502e8de3701SAlexandre Bounine 
503e8de3701SAlexandre Bounine 	md->mport->host_deviceid = hdid;
504e8de3701SAlexandre Bounine 	md->properties.hdid = hdid;
505e8de3701SAlexandre Bounine 	rio_local_set_device_id(md->mport, hdid);
506e8de3701SAlexandre Bounine 
507e8de3701SAlexandre Bounine 	rmcd_debug(MPORT, "Set host device Id to %d", hdid);
508e8de3701SAlexandre Bounine 
509e8de3701SAlexandre Bounine 	return 0;
510e8de3701SAlexandre Bounine }
511e8de3701SAlexandre Bounine 
512e8de3701SAlexandre Bounine /*
513e8de3701SAlexandre Bounine  * maint_comptag_set() - Set the host Component Tag
514e8de3701SAlexandre Bounine  * @priv: driver private data
515e8de3701SAlexandre Bounine  * @arg:	Component Tag
516e8de3701SAlexandre Bounine  */
517e8de3701SAlexandre Bounine static int maint_comptag_set(struct mport_cdev_priv *priv, void __user *arg)
518e8de3701SAlexandre Bounine {
519e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
5204e1016daSAlexandre Bounine 	u32 comptag;
521e8de3701SAlexandre Bounine 
5224e1016daSAlexandre Bounine 	if (copy_from_user(&comptag, arg, sizeof(comptag)))
523e8de3701SAlexandre Bounine 		return -EFAULT;
524e8de3701SAlexandre Bounine 
525e8de3701SAlexandre Bounine 	rio_local_write_config_32(md->mport, RIO_COMPONENT_TAG_CSR, comptag);
526e8de3701SAlexandre Bounine 
527e8de3701SAlexandre Bounine 	rmcd_debug(MPORT, "Set host Component Tag to %d", comptag);
528e8de3701SAlexandre Bounine 
529e8de3701SAlexandre Bounine 	return 0;
530e8de3701SAlexandre Bounine }
531e8de3701SAlexandre Bounine 
532e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
533e8de3701SAlexandre Bounine 
534e8de3701SAlexandre Bounine struct mport_dma_req {
535bbd876adSIoan Nicu 	struct kref refcount;
536e8de3701SAlexandre Bounine 	struct list_head node;
537e8de3701SAlexandre Bounine 	struct file *filp;
538e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
539e8de3701SAlexandre Bounine 	enum rio_transfer_sync sync;
540e8de3701SAlexandre Bounine 	struct sg_table sgt;
541e8de3701SAlexandre Bounine 	struct page **page_list;
542e8de3701SAlexandre Bounine 	unsigned int nr_pages;
543e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
544e8de3701SAlexandre Bounine 	struct dma_chan *dmach;
545e8de3701SAlexandre Bounine 	enum dma_data_direction dir;
546e8de3701SAlexandre Bounine 	dma_cookie_t cookie;
547e8de3701SAlexandre Bounine 	enum dma_status	status;
548e8de3701SAlexandre Bounine 	struct completion req_comp;
549e8de3701SAlexandre Bounine };
550e8de3701SAlexandre Bounine 
551e8de3701SAlexandre Bounine static void mport_release_def_dma(struct kref *dma_ref)
552e8de3701SAlexandre Bounine {
553e8de3701SAlexandre Bounine 	struct mport_dev *md =
554e8de3701SAlexandre Bounine 			container_of(dma_ref, struct mport_dev, dma_ref);
555e8de3701SAlexandre Bounine 
556e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "DMA_%d", md->dma_chan->chan_id);
557e8de3701SAlexandre Bounine 	rio_release_dma(md->dma_chan);
558e8de3701SAlexandre Bounine 	md->dma_chan = NULL;
559e8de3701SAlexandre Bounine }
560e8de3701SAlexandre Bounine 
561e8de3701SAlexandre Bounine static void mport_release_dma(struct kref *dma_ref)
562e8de3701SAlexandre Bounine {
563e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv =
564e8de3701SAlexandre Bounine 			container_of(dma_ref, struct mport_cdev_priv, dma_ref);
565e8de3701SAlexandre Bounine 
566e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "DMA_%d", priv->dmach->chan_id);
567e8de3701SAlexandre Bounine 	complete(&priv->comp);
568e8de3701SAlexandre Bounine }
569e8de3701SAlexandre Bounine 
570bbd876adSIoan Nicu static void dma_req_free(struct kref *ref)
571e8de3701SAlexandre Bounine {
572bbd876adSIoan Nicu 	struct mport_dma_req *req = container_of(ref, struct mport_dma_req,
573bbd876adSIoan Nicu 			refcount);
574e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = req->priv;
575e8de3701SAlexandre Bounine 	unsigned int i;
576e8de3701SAlexandre Bounine 
577e8de3701SAlexandre Bounine 	dma_unmap_sg(req->dmach->device->dev,
578e8de3701SAlexandre Bounine 		     req->sgt.sgl, req->sgt.nents, req->dir);
579e8de3701SAlexandre Bounine 	sg_free_table(&req->sgt);
580e8de3701SAlexandre Bounine 	if (req->page_list) {
581e8de3701SAlexandre Bounine 		for (i = 0; i < req->nr_pages; i++)
582e8de3701SAlexandre Bounine 			put_page(req->page_list[i]);
583e8de3701SAlexandre Bounine 		kfree(req->page_list);
584e8de3701SAlexandre Bounine 	}
585e8de3701SAlexandre Bounine 
586e8de3701SAlexandre Bounine 	if (req->map) {
587e8de3701SAlexandre Bounine 		mutex_lock(&req->map->md->buf_mutex);
588e8de3701SAlexandre Bounine 		kref_put(&req->map->ref, mport_release_mapping);
589e8de3701SAlexandre Bounine 		mutex_unlock(&req->map->md->buf_mutex);
590e8de3701SAlexandre Bounine 	}
591e8de3701SAlexandre Bounine 
592e8de3701SAlexandre Bounine 	kref_put(&priv->dma_ref, mport_release_dma);
593e8de3701SAlexandre Bounine 
594e8de3701SAlexandre Bounine 	kfree(req);
595e8de3701SAlexandre Bounine }
596e8de3701SAlexandre Bounine 
597e8de3701SAlexandre Bounine static void dma_xfer_callback(void *param)
598e8de3701SAlexandre Bounine {
599e8de3701SAlexandre Bounine 	struct mport_dma_req *req = (struct mport_dma_req *)param;
600e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = req->priv;
601e8de3701SAlexandre Bounine 
602e8de3701SAlexandre Bounine 	req->status = dma_async_is_tx_complete(priv->dmach, req->cookie,
603e8de3701SAlexandre Bounine 					       NULL, NULL);
604e8de3701SAlexandre Bounine 	complete(&req->req_comp);
605bbd876adSIoan Nicu 	kref_put(&req->refcount, dma_req_free);
606e8de3701SAlexandre Bounine }
607e8de3701SAlexandre Bounine 
608e8de3701SAlexandre Bounine /*
609e8de3701SAlexandre Bounine  * prep_dma_xfer() - Configure and send request to DMAengine to prepare DMA
610e8de3701SAlexandre Bounine  *                   transfer object.
611e8de3701SAlexandre Bounine  * Returns pointer to DMA transaction descriptor allocated by DMA driver on
612e8de3701SAlexandre Bounine  * success or ERR_PTR (and/or NULL) if failed. Caller must check returned
613e8de3701SAlexandre Bounine  * non-NULL pointer using IS_ERR macro.
614e8de3701SAlexandre Bounine  */
615e8de3701SAlexandre Bounine static struct dma_async_tx_descriptor
616e8de3701SAlexandre Bounine *prep_dma_xfer(struct dma_chan *chan, struct rio_transfer_io *transfer,
617e8de3701SAlexandre Bounine 	struct sg_table *sgt, int nents, enum dma_transfer_direction dir,
618e8de3701SAlexandre Bounine 	enum dma_ctrl_flags flags)
619e8de3701SAlexandre Bounine {
620e8de3701SAlexandre Bounine 	struct rio_dma_data tx_data;
621e8de3701SAlexandre Bounine 
622e8de3701SAlexandre Bounine 	tx_data.sg = sgt->sgl;
623e8de3701SAlexandre Bounine 	tx_data.sg_len = nents;
624e8de3701SAlexandre Bounine 	tx_data.rio_addr_u = 0;
625e8de3701SAlexandre Bounine 	tx_data.rio_addr = transfer->rio_addr;
626e8de3701SAlexandre Bounine 	if (dir == DMA_MEM_TO_DEV) {
627e8de3701SAlexandre Bounine 		switch (transfer->method) {
628e8de3701SAlexandre Bounine 		case RIO_EXCHANGE_NWRITE:
629e8de3701SAlexandre Bounine 			tx_data.wr_type = RDW_ALL_NWRITE;
630e8de3701SAlexandre Bounine 			break;
631e8de3701SAlexandre Bounine 		case RIO_EXCHANGE_NWRITE_R_ALL:
632e8de3701SAlexandre Bounine 			tx_data.wr_type = RDW_ALL_NWRITE_R;
633e8de3701SAlexandre Bounine 			break;
634e8de3701SAlexandre Bounine 		case RIO_EXCHANGE_NWRITE_R:
635e8de3701SAlexandre Bounine 			tx_data.wr_type = RDW_LAST_NWRITE_R;
636e8de3701SAlexandre Bounine 			break;
637e8de3701SAlexandre Bounine 		case RIO_EXCHANGE_DEFAULT:
638e8de3701SAlexandre Bounine 			tx_data.wr_type = RDW_DEFAULT;
639e8de3701SAlexandre Bounine 			break;
640e8de3701SAlexandre Bounine 		default:
641e8de3701SAlexandre Bounine 			return ERR_PTR(-EINVAL);
642e8de3701SAlexandre Bounine 		}
643e8de3701SAlexandre Bounine 	}
644e8de3701SAlexandre Bounine 
645e8de3701SAlexandre Bounine 	return rio_dma_prep_xfer(chan, transfer->rioid, &tx_data, dir, flags);
646e8de3701SAlexandre Bounine }
647e8de3701SAlexandre Bounine 
648e8de3701SAlexandre Bounine /* Request DMA channel associated with this mport device.
649e8de3701SAlexandre Bounine  * Try to request DMA channel for every new process that opened given
650e8de3701SAlexandre Bounine  * mport. If a new DMA channel is not available use default channel
651e8de3701SAlexandre Bounine  * which is the first DMA channel opened on mport device.
652e8de3701SAlexandre Bounine  */
653e8de3701SAlexandre Bounine static int get_dma_channel(struct mport_cdev_priv *priv)
654e8de3701SAlexandre Bounine {
655e8de3701SAlexandre Bounine 	mutex_lock(&priv->dma_lock);
656e8de3701SAlexandre Bounine 	if (!priv->dmach) {
657e8de3701SAlexandre Bounine 		priv->dmach = rio_request_mport_dma(priv->md->mport);
658e8de3701SAlexandre Bounine 		if (!priv->dmach) {
659e8de3701SAlexandre Bounine 			/* Use default DMA channel if available */
660e8de3701SAlexandre Bounine 			if (priv->md->dma_chan) {
661e8de3701SAlexandre Bounine 				priv->dmach = priv->md->dma_chan;
662e8de3701SAlexandre Bounine 				kref_get(&priv->md->dma_ref);
663e8de3701SAlexandre Bounine 			} else {
664e8de3701SAlexandre Bounine 				rmcd_error("Failed to get DMA channel");
665e8de3701SAlexandre Bounine 				mutex_unlock(&priv->dma_lock);
666e8de3701SAlexandre Bounine 				return -ENODEV;
667e8de3701SAlexandre Bounine 			}
668e8de3701SAlexandre Bounine 		} else if (!priv->md->dma_chan) {
669e8de3701SAlexandre Bounine 			/* Register default DMA channel if we do not have one */
670e8de3701SAlexandre Bounine 			priv->md->dma_chan = priv->dmach;
671e8de3701SAlexandre Bounine 			kref_init(&priv->md->dma_ref);
672e8de3701SAlexandre Bounine 			rmcd_debug(DMA, "Register DMA_chan %d as default",
673e8de3701SAlexandre Bounine 				   priv->dmach->chan_id);
674e8de3701SAlexandre Bounine 		}
675e8de3701SAlexandre Bounine 
676e8de3701SAlexandre Bounine 		kref_init(&priv->dma_ref);
677e8de3701SAlexandre Bounine 		init_completion(&priv->comp);
678e8de3701SAlexandre Bounine 	}
679e8de3701SAlexandre Bounine 
680e8de3701SAlexandre Bounine 	kref_get(&priv->dma_ref);
681e8de3701SAlexandre Bounine 	mutex_unlock(&priv->dma_lock);
682e8de3701SAlexandre Bounine 	return 0;
683e8de3701SAlexandre Bounine }
684e8de3701SAlexandre Bounine 
685e8de3701SAlexandre Bounine static void put_dma_channel(struct mport_cdev_priv *priv)
686e8de3701SAlexandre Bounine {
687e8de3701SAlexandre Bounine 	kref_put(&priv->dma_ref, mport_release_dma);
688e8de3701SAlexandre Bounine }
689e8de3701SAlexandre Bounine 
690e8de3701SAlexandre Bounine /*
691e8de3701SAlexandre Bounine  * DMA transfer functions
692e8de3701SAlexandre Bounine  */
693e8de3701SAlexandre Bounine static int do_dma_request(struct mport_dma_req *req,
694e8de3701SAlexandre Bounine 			  struct rio_transfer_io *xfer,
695e8de3701SAlexandre Bounine 			  enum rio_transfer_sync sync, int nents)
696e8de3701SAlexandre Bounine {
697e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
698e8de3701SAlexandre Bounine 	struct sg_table *sgt;
699e8de3701SAlexandre Bounine 	struct dma_chan *chan;
700e8de3701SAlexandre Bounine 	struct dma_async_tx_descriptor *tx;
701e8de3701SAlexandre Bounine 	dma_cookie_t cookie;
702e8de3701SAlexandre Bounine 	unsigned long tmo = msecs_to_jiffies(dma_timeout);
703e8de3701SAlexandre Bounine 	enum dma_transfer_direction dir;
704e8de3701SAlexandre Bounine 	long wret;
705e8de3701SAlexandre Bounine 	int ret = 0;
706e8de3701SAlexandre Bounine 
707e8de3701SAlexandre Bounine 	priv = req->priv;
708e8de3701SAlexandre Bounine 	sgt = &req->sgt;
709e8de3701SAlexandre Bounine 
710e8de3701SAlexandre Bounine 	chan = priv->dmach;
711e8de3701SAlexandre Bounine 	dir = (req->dir == DMA_FROM_DEVICE) ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV;
712e8de3701SAlexandre Bounine 
713e8de3701SAlexandre Bounine 	rmcd_debug(DMA, "%s(%d) uses %s for DMA_%s",
714e8de3701SAlexandre Bounine 		   current->comm, task_pid_nr(current),
715e8de3701SAlexandre Bounine 		   dev_name(&chan->dev->device),
716e8de3701SAlexandre Bounine 		   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE");
717e8de3701SAlexandre Bounine 
718e8de3701SAlexandre Bounine 	/* Initialize DMA transaction request */
719e8de3701SAlexandre Bounine 	tx = prep_dma_xfer(chan, xfer, sgt, nents, dir,
720e8de3701SAlexandre Bounine 			   DMA_CTRL_ACK | DMA_PREP_INTERRUPT);
721e8de3701SAlexandre Bounine 
722e8de3701SAlexandre Bounine 	if (!tx) {
723e8de3701SAlexandre Bounine 		rmcd_debug(DMA, "prep error for %s A:0x%llx L:0x%llx",
724e8de3701SAlexandre Bounine 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
725e8de3701SAlexandre Bounine 			xfer->rio_addr, xfer->length);
726e8de3701SAlexandre Bounine 		ret = -EIO;
727e8de3701SAlexandre Bounine 		goto err_out;
728e8de3701SAlexandre Bounine 	} else if (IS_ERR(tx)) {
729e8de3701SAlexandre Bounine 		ret = PTR_ERR(tx);
730e8de3701SAlexandre Bounine 		rmcd_debug(DMA, "prep error %d for %s A:0x%llx L:0x%llx", ret,
731e8de3701SAlexandre Bounine 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
732e8de3701SAlexandre Bounine 			xfer->rio_addr, xfer->length);
733e8de3701SAlexandre Bounine 		goto err_out;
734e8de3701SAlexandre Bounine 	}
735e8de3701SAlexandre Bounine 
736e8de3701SAlexandre Bounine 	tx->callback = dma_xfer_callback;
737e8de3701SAlexandre Bounine 	tx->callback_param = req;
738e8de3701SAlexandre Bounine 
739e8de3701SAlexandre Bounine 	req->status = DMA_IN_PROGRESS;
740bbd876adSIoan Nicu 	kref_get(&req->refcount);
741e8de3701SAlexandre Bounine 
742e8de3701SAlexandre Bounine 	cookie = dmaengine_submit(tx);
743e8de3701SAlexandre Bounine 	req->cookie = cookie;
744e8de3701SAlexandre Bounine 
745e8de3701SAlexandre Bounine 	rmcd_debug(DMA, "pid=%d DMA_%s tx_cookie = %d", task_pid_nr(current),
746e8de3701SAlexandre Bounine 		   (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
747e8de3701SAlexandre Bounine 
748e8de3701SAlexandre Bounine 	if (dma_submit_error(cookie)) {
749e8de3701SAlexandre Bounine 		rmcd_error("submit err=%d (addr:0x%llx len:0x%llx)",
750e8de3701SAlexandre Bounine 			   cookie, xfer->rio_addr, xfer->length);
751bbd876adSIoan Nicu 		kref_put(&req->refcount, dma_req_free);
752e8de3701SAlexandre Bounine 		ret = -EIO;
753e8de3701SAlexandre Bounine 		goto err_out;
754e8de3701SAlexandre Bounine 	}
755e8de3701SAlexandre Bounine 
756e8de3701SAlexandre Bounine 	dma_async_issue_pending(chan);
757e8de3701SAlexandre Bounine 
758e8de3701SAlexandre Bounine 	if (sync == RIO_TRANSFER_ASYNC) {
759e8de3701SAlexandre Bounine 		spin_lock(&priv->req_lock);
760e8de3701SAlexandre Bounine 		list_add_tail(&req->node, &priv->async_list);
761e8de3701SAlexandre Bounine 		spin_unlock(&priv->req_lock);
762e8de3701SAlexandre Bounine 		return cookie;
763e8de3701SAlexandre Bounine 	} else if (sync == RIO_TRANSFER_FAF)
764e8de3701SAlexandre Bounine 		return 0;
765e8de3701SAlexandre Bounine 
766e8de3701SAlexandre Bounine 	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
767e8de3701SAlexandre Bounine 
768e8de3701SAlexandre Bounine 	if (wret == 0) {
769e8de3701SAlexandre Bounine 		/* Timeout on wait occurred */
770e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) timed out waiting for DMA_%s %d",
771e8de3701SAlexandre Bounine 		       current->comm, task_pid_nr(current),
772e8de3701SAlexandre Bounine 		       (dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
773e8de3701SAlexandre Bounine 		return -ETIMEDOUT;
774e8de3701SAlexandre Bounine 	} else if (wret == -ERESTARTSYS) {
775e8de3701SAlexandre Bounine 		/* Wait_for_completion was interrupted by a signal but DMA may
776e8de3701SAlexandre Bounine 		 * be in progress
777e8de3701SAlexandre Bounine 		 */
778e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) wait for DMA_%s %d was interrupted",
779e8de3701SAlexandre Bounine 			current->comm, task_pid_nr(current),
780e8de3701SAlexandre Bounine 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE", cookie);
781e8de3701SAlexandre Bounine 		return -EINTR;
782e8de3701SAlexandre Bounine 	}
783e8de3701SAlexandre Bounine 
784e8de3701SAlexandre Bounine 	if (req->status != DMA_COMPLETE) {
785e8de3701SAlexandre Bounine 		/* DMA transaction completion was signaled with error */
786e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) DMA_%s %d completed with status %d (ret=%d)",
787e8de3701SAlexandre Bounine 			current->comm, task_pid_nr(current),
788e8de3701SAlexandre Bounine 			(dir == DMA_DEV_TO_MEM)?"READ":"WRITE",
789e8de3701SAlexandre Bounine 			cookie, req->status, ret);
790e8de3701SAlexandre Bounine 		ret = -EIO;
791e8de3701SAlexandre Bounine 	}
792e8de3701SAlexandre Bounine 
793e8de3701SAlexandre Bounine err_out:
794e8de3701SAlexandre Bounine 	return ret;
795e8de3701SAlexandre Bounine }
796e8de3701SAlexandre Bounine 
797e8de3701SAlexandre Bounine /*
798e8de3701SAlexandre Bounine  * rio_dma_transfer() - Perform RapidIO DMA data transfer to/from
799e8de3701SAlexandre Bounine  *                      the remote RapidIO device
800e8de3701SAlexandre Bounine  * @filp: file pointer associated with the call
801e8de3701SAlexandre Bounine  * @transfer_mode: DMA transfer mode
802e8de3701SAlexandre Bounine  * @sync: synchronization mode
803e8de3701SAlexandre Bounine  * @dir: DMA transfer direction (DMA_MEM_TO_DEV = write OR
804e8de3701SAlexandre Bounine  *                               DMA_DEV_TO_MEM = read)
805e8de3701SAlexandre Bounine  * @xfer: data transfer descriptor structure
806e8de3701SAlexandre Bounine  */
807e8de3701SAlexandre Bounine static int
8084e1016daSAlexandre Bounine rio_dma_transfer(struct file *filp, u32 transfer_mode,
809e8de3701SAlexandre Bounine 		 enum rio_transfer_sync sync, enum dma_data_direction dir,
810e8de3701SAlexandre Bounine 		 struct rio_transfer_io *xfer)
811e8de3701SAlexandre Bounine {
812e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
813e8de3701SAlexandre Bounine 	unsigned long nr_pages = 0;
814e8de3701SAlexandre Bounine 	struct page **page_list = NULL;
815e8de3701SAlexandre Bounine 	struct mport_dma_req *req;
816e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
817e8de3701SAlexandre Bounine 	struct dma_chan *chan;
818e8de3701SAlexandre Bounine 	int i, ret;
819e8de3701SAlexandre Bounine 	int nents;
820e8de3701SAlexandre Bounine 
821e8de3701SAlexandre Bounine 	if (xfer->length == 0)
822e8de3701SAlexandre Bounine 		return -EINVAL;
823e8de3701SAlexandre Bounine 	req = kzalloc(sizeof(*req), GFP_KERNEL);
824e8de3701SAlexandre Bounine 	if (!req)
825e8de3701SAlexandre Bounine 		return -ENOMEM;
826e8de3701SAlexandre Bounine 
827e8de3701SAlexandre Bounine 	ret = get_dma_channel(priv);
828e8de3701SAlexandre Bounine 	if (ret) {
829e8de3701SAlexandre Bounine 		kfree(req);
830e8de3701SAlexandre Bounine 		return ret;
831e8de3701SAlexandre Bounine 	}
832c5157b76SIoan Nicu 	chan = priv->dmach;
833c5157b76SIoan Nicu 
834c5157b76SIoan Nicu 	kref_init(&req->refcount);
835c5157b76SIoan Nicu 	init_completion(&req->req_comp);
836c5157b76SIoan Nicu 	req->dir = dir;
837c5157b76SIoan Nicu 	req->filp = filp;
838c5157b76SIoan Nicu 	req->priv = priv;
839c5157b76SIoan Nicu 	req->dmach = chan;
840c5157b76SIoan Nicu 	req->sync = sync;
841e8de3701SAlexandre Bounine 
842e8de3701SAlexandre Bounine 	/*
843e8de3701SAlexandre Bounine 	 * If parameter loc_addr != NULL, we are transferring data from/to
844e8de3701SAlexandre Bounine 	 * data buffer allocated in user-space: lock in memory user-space
845e8de3701SAlexandre Bounine 	 * buffer pages and build an SG table for DMA transfer request
846e8de3701SAlexandre Bounine 	 *
847e8de3701SAlexandre Bounine 	 * Otherwise (loc_addr == NULL) contiguous kernel-space buffer is
848e8de3701SAlexandre Bounine 	 * used for DMA data transfers: build single entry SG table using
849e8de3701SAlexandre Bounine 	 * offset within the internal buffer specified by handle parameter.
850e8de3701SAlexandre Bounine 	 */
851e8de3701SAlexandre Bounine 	if (xfer->loc_addr) {
852c4860ad6STvrtko Ursulin 		unsigned int offset;
853e8de3701SAlexandre Bounine 		long pinned;
854e8de3701SAlexandre Bounine 
855c4860ad6STvrtko Ursulin 		offset = lower_32_bits(offset_in_page(xfer->loc_addr));
856e8de3701SAlexandre Bounine 		nr_pages = PAGE_ALIGN(xfer->length + offset) >> PAGE_SHIFT;
857e8de3701SAlexandre Bounine 
858e8de3701SAlexandre Bounine 		page_list = kmalloc_array(nr_pages,
859e8de3701SAlexandre Bounine 					  sizeof(*page_list), GFP_KERNEL);
860e8de3701SAlexandre Bounine 		if (page_list == NULL) {
861e8de3701SAlexandre Bounine 			ret = -ENOMEM;
862e8de3701SAlexandre Bounine 			goto err_req;
863e8de3701SAlexandre Bounine 		}
864e8de3701SAlexandre Bounine 
8650ca36a6bSAl Viro 		pinned = get_user_pages_fast(
866e8de3701SAlexandre Bounine 				(unsigned long)xfer->loc_addr & PAGE_MASK,
86773b0140bSIra Weiny 				nr_pages,
86873b0140bSIra Weiny 				dir == DMA_FROM_DEVICE ? FOLL_WRITE : 0,
86973b0140bSIra Weiny 				page_list);
870e8de3701SAlexandre Bounine 
871e8de3701SAlexandre Bounine 		if (pinned != nr_pages) {
872e8de3701SAlexandre Bounine 			if (pinned < 0) {
873369f2679SLorenzo Stoakes 				rmcd_error("get_user_pages_unlocked err=%ld",
874369f2679SLorenzo Stoakes 					   pinned);
875e8de3701SAlexandre Bounine 				nr_pages = 0;
876e8de3701SAlexandre Bounine 			} else
877e8de3701SAlexandre Bounine 				rmcd_error("pinned %ld out of %ld pages",
878e8de3701SAlexandre Bounine 					   pinned, nr_pages);
879e8de3701SAlexandre Bounine 			ret = -EFAULT;
880e8de3701SAlexandre Bounine 			goto err_pg;
881e8de3701SAlexandre Bounine 		}
882e8de3701SAlexandre Bounine 
883e8de3701SAlexandre Bounine 		ret = sg_alloc_table_from_pages(&req->sgt, page_list, nr_pages,
884e8de3701SAlexandre Bounine 					offset, xfer->length, GFP_KERNEL);
885e8de3701SAlexandre Bounine 		if (ret) {
886e8de3701SAlexandre Bounine 			rmcd_error("sg_alloc_table failed with err=%d", ret);
887e8de3701SAlexandre Bounine 			goto err_pg;
888e8de3701SAlexandre Bounine 		}
889e8de3701SAlexandre Bounine 
890e8de3701SAlexandre Bounine 		req->page_list = page_list;
891e8de3701SAlexandre Bounine 		req->nr_pages = nr_pages;
892e8de3701SAlexandre Bounine 	} else {
893e8de3701SAlexandre Bounine 		dma_addr_t baddr;
894e8de3701SAlexandre Bounine 		struct rio_mport_mapping *map;
895e8de3701SAlexandre Bounine 
896e8de3701SAlexandre Bounine 		baddr = (dma_addr_t)xfer->handle;
897e8de3701SAlexandre Bounine 
898e8de3701SAlexandre Bounine 		mutex_lock(&md->buf_mutex);
899e8de3701SAlexandre Bounine 		list_for_each_entry(map, &md->mappings, node) {
900e8de3701SAlexandre Bounine 			if (baddr >= map->phys_addr &&
901e8de3701SAlexandre Bounine 			    baddr < (map->phys_addr + map->size)) {
902e8de3701SAlexandre Bounine 				kref_get(&map->ref);
903e8de3701SAlexandre Bounine 				req->map = map;
904e8de3701SAlexandre Bounine 				break;
905e8de3701SAlexandre Bounine 			}
906e8de3701SAlexandre Bounine 		}
907e8de3701SAlexandre Bounine 		mutex_unlock(&md->buf_mutex);
908e8de3701SAlexandre Bounine 
909e8de3701SAlexandre Bounine 		if (req->map == NULL) {
910e8de3701SAlexandre Bounine 			ret = -ENOMEM;
911e8de3701SAlexandre Bounine 			goto err_req;
912e8de3701SAlexandre Bounine 		}
913e8de3701SAlexandre Bounine 
914e8de3701SAlexandre Bounine 		if (xfer->length + xfer->offset > map->size) {
915e8de3701SAlexandre Bounine 			ret = -EINVAL;
916e8de3701SAlexandre Bounine 			goto err_req;
917e8de3701SAlexandre Bounine 		}
918e8de3701SAlexandre Bounine 
919e8de3701SAlexandre Bounine 		ret = sg_alloc_table(&req->sgt, 1, GFP_KERNEL);
920e8de3701SAlexandre Bounine 		if (unlikely(ret)) {
921e8de3701SAlexandre Bounine 			rmcd_error("sg_alloc_table failed for internal buf");
922e8de3701SAlexandre Bounine 			goto err_req;
923e8de3701SAlexandre Bounine 		}
924e8de3701SAlexandre Bounine 
925e8de3701SAlexandre Bounine 		sg_set_buf(req->sgt.sgl,
926e8de3701SAlexandre Bounine 			   map->virt_addr + (baddr - map->phys_addr) +
927e8de3701SAlexandre Bounine 				xfer->offset, xfer->length);
928e8de3701SAlexandre Bounine 	}
929e8de3701SAlexandre Bounine 
930e8de3701SAlexandre Bounine 	nents = dma_map_sg(chan->device->dev,
931e8de3701SAlexandre Bounine 			   req->sgt.sgl, req->sgt.nents, dir);
932c46d90cdSChristophe JAILLET 	if (nents == 0) {
933e8de3701SAlexandre Bounine 		rmcd_error("Failed to map SG list");
934b1402dcbSChristophe JAILLET 		ret = -EFAULT;
935b1402dcbSChristophe JAILLET 		goto err_pg;
936e8de3701SAlexandre Bounine 	}
937e8de3701SAlexandre Bounine 
938e8de3701SAlexandre Bounine 	ret = do_dma_request(req, xfer, sync, nents);
939e8de3701SAlexandre Bounine 
940e8de3701SAlexandre Bounine 	if (ret >= 0) {
941bbd876adSIoan Nicu 		if (sync == RIO_TRANSFER_ASYNC)
942e8de3701SAlexandre Bounine 			return ret; /* return ASYNC cookie */
943bbd876adSIoan Nicu 	} else {
944e8de3701SAlexandre Bounine 		rmcd_debug(DMA, "do_dma_request failed with err=%d", ret);
945bbd876adSIoan Nicu 	}
946bbd876adSIoan Nicu 
947e8de3701SAlexandre Bounine err_pg:
948bbd876adSIoan Nicu 	if (!req->page_list) {
949e8de3701SAlexandre Bounine 		for (i = 0; i < nr_pages; i++)
950e8de3701SAlexandre Bounine 			put_page(page_list[i]);
951e8de3701SAlexandre Bounine 		kfree(page_list);
952e8de3701SAlexandre Bounine 	}
953e8de3701SAlexandre Bounine err_req:
954bbd876adSIoan Nicu 	kref_put(&req->refcount, dma_req_free);
955e8de3701SAlexandre Bounine 	return ret;
956e8de3701SAlexandre Bounine }
957e8de3701SAlexandre Bounine 
958e8de3701SAlexandre Bounine static int rio_mport_transfer_ioctl(struct file *filp, void __user *arg)
959e8de3701SAlexandre Bounine {
960e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
961e8de3701SAlexandre Bounine 	struct rio_transaction transaction;
962e8de3701SAlexandre Bounine 	struct rio_transfer_io *transfer;
963e8de3701SAlexandre Bounine 	enum dma_data_direction dir;
964e8de3701SAlexandre Bounine 	int i, ret = 0;
965e8de3701SAlexandre Bounine 
966e8de3701SAlexandre Bounine 	if (unlikely(copy_from_user(&transaction, arg, sizeof(transaction))))
967e8de3701SAlexandre Bounine 		return -EFAULT;
968e8de3701SAlexandre Bounine 
9694e1016daSAlexandre Bounine 	if (transaction.count != 1) /* only single transfer for now */
970e8de3701SAlexandre Bounine 		return -EINVAL;
971e8de3701SAlexandre Bounine 
972e8de3701SAlexandre Bounine 	if ((transaction.transfer_mode &
973e8de3701SAlexandre Bounine 	     priv->md->properties.transfer_mode) == 0)
974e8de3701SAlexandre Bounine 		return -ENODEV;
975e8de3701SAlexandre Bounine 
97642bc47b3SKees Cook 	transfer = vmalloc(array_size(sizeof(*transfer), transaction.count));
977e8de3701SAlexandre Bounine 	if (!transfer)
978e8de3701SAlexandre Bounine 		return -ENOMEM;
979e8de3701SAlexandre Bounine 
9804e1016daSAlexandre Bounine 	if (unlikely(copy_from_user(transfer,
9814e1016daSAlexandre Bounine 				    (void __user *)(uintptr_t)transaction.block,
9824e1016daSAlexandre Bounine 				    transaction.count * sizeof(*transfer)))) {
983e8de3701SAlexandre Bounine 		ret = -EFAULT;
984e8de3701SAlexandre Bounine 		goto out_free;
985e8de3701SAlexandre Bounine 	}
986e8de3701SAlexandre Bounine 
987e8de3701SAlexandre Bounine 	dir = (transaction.dir == RIO_TRANSFER_DIR_READ) ?
988e8de3701SAlexandre Bounine 					DMA_FROM_DEVICE : DMA_TO_DEVICE;
989e8de3701SAlexandre Bounine 	for (i = 0; i < transaction.count && ret == 0; i++)
990e8de3701SAlexandre Bounine 		ret = rio_dma_transfer(filp, transaction.transfer_mode,
991e8de3701SAlexandre Bounine 			transaction.sync, dir, &transfer[i]);
992e8de3701SAlexandre Bounine 
9934e1016daSAlexandre Bounine 	if (unlikely(copy_to_user((void __user *)(uintptr_t)transaction.block,
9944e1016daSAlexandre Bounine 				  transfer,
9954e1016daSAlexandre Bounine 				  transaction.count * sizeof(*transfer))))
996e8de3701SAlexandre Bounine 		ret = -EFAULT;
997e8de3701SAlexandre Bounine 
998e8de3701SAlexandre Bounine out_free:
999e8de3701SAlexandre Bounine 	vfree(transfer);
1000e8de3701SAlexandre Bounine 
1001e8de3701SAlexandre Bounine 	return ret;
1002e8de3701SAlexandre Bounine }
1003e8de3701SAlexandre Bounine 
1004e8de3701SAlexandre Bounine static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1005e8de3701SAlexandre Bounine {
1006e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
1007e8de3701SAlexandre Bounine 	struct rio_async_tx_wait w_param;
1008e8de3701SAlexandre Bounine 	struct mport_dma_req *req;
1009e8de3701SAlexandre Bounine 	dma_cookie_t cookie;
1010e8de3701SAlexandre Bounine 	unsigned long tmo;
1011e8de3701SAlexandre Bounine 	long wret;
1012e8de3701SAlexandre Bounine 	int found = 0;
1013e8de3701SAlexandre Bounine 	int ret;
1014e8de3701SAlexandre Bounine 
1015e8de3701SAlexandre Bounine 	priv = (struct mport_cdev_priv *)filp->private_data;
1016e8de3701SAlexandre Bounine 
1017e8de3701SAlexandre Bounine 	if (unlikely(copy_from_user(&w_param, arg, sizeof(w_param))))
1018e8de3701SAlexandre Bounine 		return -EFAULT;
1019e8de3701SAlexandre Bounine 
1020e8de3701SAlexandre Bounine 	cookie = w_param.token;
1021e8de3701SAlexandre Bounine 	if (w_param.timeout)
1022e8de3701SAlexandre Bounine 		tmo = msecs_to_jiffies(w_param.timeout);
1023e8de3701SAlexandre Bounine 	else /* Use default DMA timeout */
1024e8de3701SAlexandre Bounine 		tmo = msecs_to_jiffies(dma_timeout);
1025e8de3701SAlexandre Bounine 
1026e8de3701SAlexandre Bounine 	spin_lock(&priv->req_lock);
1027e8de3701SAlexandre Bounine 	list_for_each_entry(req, &priv->async_list, node) {
1028e8de3701SAlexandre Bounine 		if (req->cookie == cookie) {
1029e8de3701SAlexandre Bounine 			list_del(&req->node);
1030e8de3701SAlexandre Bounine 			found = 1;
1031e8de3701SAlexandre Bounine 			break;
1032e8de3701SAlexandre Bounine 		}
1033e8de3701SAlexandre Bounine 	}
1034e8de3701SAlexandre Bounine 	spin_unlock(&priv->req_lock);
1035e8de3701SAlexandre Bounine 
1036e8de3701SAlexandre Bounine 	if (!found)
1037e8de3701SAlexandre Bounine 		return -EAGAIN;
1038e8de3701SAlexandre Bounine 
1039e8de3701SAlexandre Bounine 	wret = wait_for_completion_interruptible_timeout(&req->req_comp, tmo);
1040e8de3701SAlexandre Bounine 
1041e8de3701SAlexandre Bounine 	if (wret == 0) {
1042e8de3701SAlexandre Bounine 		/* Timeout on wait occurred */
1043e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) timed out waiting for ASYNC DMA_%s",
1044e8de3701SAlexandre Bounine 		       current->comm, task_pid_nr(current),
1045e8de3701SAlexandre Bounine 		       (req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1046e8de3701SAlexandre Bounine 		ret = -ETIMEDOUT;
1047e8de3701SAlexandre Bounine 		goto err_tmo;
1048e8de3701SAlexandre Bounine 	} else if (wret == -ERESTARTSYS) {
1049e8de3701SAlexandre Bounine 		/* Wait_for_completion was interrupted by a signal but DMA may
1050e8de3701SAlexandre Bounine 		 * be still in progress
1051e8de3701SAlexandre Bounine 		 */
1052e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) wait for ASYNC DMA_%s was interrupted",
1053e8de3701SAlexandre Bounine 			current->comm, task_pid_nr(current),
1054e8de3701SAlexandre Bounine 			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE");
1055e8de3701SAlexandre Bounine 		ret = -EINTR;
1056e8de3701SAlexandre Bounine 		goto err_tmo;
1057e8de3701SAlexandre Bounine 	}
1058e8de3701SAlexandre Bounine 
1059e8de3701SAlexandre Bounine 	if (req->status != DMA_COMPLETE) {
1060e8de3701SAlexandre Bounine 		/* DMA transaction completion signaled with transfer error */
1061e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) ASYNC DMA_%s completion with status %d",
1062e8de3701SAlexandre Bounine 			current->comm, task_pid_nr(current),
1063e8de3701SAlexandre Bounine 			(req->dir == DMA_FROM_DEVICE)?"READ":"WRITE",
1064e8de3701SAlexandre Bounine 			req->status);
1065e8de3701SAlexandre Bounine 		ret = -EIO;
1066e8de3701SAlexandre Bounine 	} else
1067e8de3701SAlexandre Bounine 		ret = 0;
1068e8de3701SAlexandre Bounine 
1069e8de3701SAlexandre Bounine 	if (req->status != DMA_IN_PROGRESS && req->status != DMA_PAUSED)
1070bbd876adSIoan Nicu 		kref_put(&req->refcount, dma_req_free);
1071e8de3701SAlexandre Bounine 
1072e8de3701SAlexandre Bounine 	return ret;
1073e8de3701SAlexandre Bounine 
1074e8de3701SAlexandre Bounine err_tmo:
1075e8de3701SAlexandre Bounine 	/* Return request back into async queue */
1076e8de3701SAlexandre Bounine 	spin_lock(&priv->req_lock);
1077e8de3701SAlexandre Bounine 	list_add_tail(&req->node, &priv->async_list);
1078e8de3701SAlexandre Bounine 	spin_unlock(&priv->req_lock);
1079e8de3701SAlexandre Bounine 	return ret;
1080e8de3701SAlexandre Bounine }
1081e8de3701SAlexandre Bounine 
1082e8de3701SAlexandre Bounine static int rio_mport_create_dma_mapping(struct mport_dev *md, struct file *filp,
10834e1016daSAlexandre Bounine 			u64 size, struct rio_mport_mapping **mapping)
1084e8de3701SAlexandre Bounine {
1085e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
1086e8de3701SAlexandre Bounine 
10874e1016daSAlexandre Bounine 	map = kzalloc(sizeof(*map), GFP_KERNEL);
1088e8de3701SAlexandre Bounine 	if (map == NULL)
1089e8de3701SAlexandre Bounine 		return -ENOMEM;
1090e8de3701SAlexandre Bounine 
1091e8de3701SAlexandre Bounine 	map->virt_addr = dma_alloc_coherent(md->mport->dev.parent, size,
1092e8de3701SAlexandre Bounine 					    &map->phys_addr, GFP_KERNEL);
1093e8de3701SAlexandre Bounine 	if (map->virt_addr == NULL) {
1094e8de3701SAlexandre Bounine 		kfree(map);
1095e8de3701SAlexandre Bounine 		return -ENOMEM;
1096e8de3701SAlexandre Bounine 	}
1097e8de3701SAlexandre Bounine 
1098e8de3701SAlexandre Bounine 	map->dir = MAP_DMA;
1099e8de3701SAlexandre Bounine 	map->size = size;
1100e8de3701SAlexandre Bounine 	map->filp = filp;
1101e8de3701SAlexandre Bounine 	map->md = md;
1102e8de3701SAlexandre Bounine 	kref_init(&map->ref);
1103e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
1104e8de3701SAlexandre Bounine 	list_add_tail(&map->node, &md->mappings);
1105e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
1106e8de3701SAlexandre Bounine 	*mapping = map;
1107e8de3701SAlexandre Bounine 
1108e8de3701SAlexandre Bounine 	return 0;
1109e8de3701SAlexandre Bounine }
1110e8de3701SAlexandre Bounine 
1111e8de3701SAlexandre Bounine static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1112e8de3701SAlexandre Bounine {
1113e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
1114e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1115e8de3701SAlexandre Bounine 	struct rio_dma_mem map;
1116e8de3701SAlexandre Bounine 	struct rio_mport_mapping *mapping = NULL;
1117e8de3701SAlexandre Bounine 	int ret;
1118e8de3701SAlexandre Bounine 
11194e1016daSAlexandre Bounine 	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1120e8de3701SAlexandre Bounine 		return -EFAULT;
1121e8de3701SAlexandre Bounine 
1122e8de3701SAlexandre Bounine 	ret = rio_mport_create_dma_mapping(md, filp, map.length, &mapping);
1123e8de3701SAlexandre Bounine 	if (ret)
1124e8de3701SAlexandre Bounine 		return ret;
1125e8de3701SAlexandre Bounine 
1126e8de3701SAlexandre Bounine 	map.dma_handle = mapping->phys_addr;
1127e8de3701SAlexandre Bounine 
11284e1016daSAlexandre Bounine 	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1129e8de3701SAlexandre Bounine 		mutex_lock(&md->buf_mutex);
1130e8de3701SAlexandre Bounine 		kref_put(&mapping->ref, mport_release_mapping);
1131e8de3701SAlexandre Bounine 		mutex_unlock(&md->buf_mutex);
1132e8de3701SAlexandre Bounine 		return -EFAULT;
1133e8de3701SAlexandre Bounine 	}
1134e8de3701SAlexandre Bounine 
1135e8de3701SAlexandre Bounine 	return 0;
1136e8de3701SAlexandre Bounine }
1137e8de3701SAlexandre Bounine 
1138e8de3701SAlexandre Bounine static int rio_mport_free_dma(struct file *filp, void __user *arg)
1139e8de3701SAlexandre Bounine {
1140e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
1141e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1142e8de3701SAlexandre Bounine 	u64 handle;
1143e8de3701SAlexandre Bounine 	int ret = -EFAULT;
1144e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map, *_map;
1145e8de3701SAlexandre Bounine 
11464e1016daSAlexandre Bounine 	if (copy_from_user(&handle, arg, sizeof(handle)))
1147e8de3701SAlexandre Bounine 		return -EFAULT;
1148e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "filp=%p", filp);
1149e8de3701SAlexandre Bounine 
1150e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
1151e8de3701SAlexandre Bounine 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
1152e8de3701SAlexandre Bounine 		if (map->dir == MAP_DMA && map->phys_addr == handle &&
1153e8de3701SAlexandre Bounine 		    map->filp == filp) {
1154e8de3701SAlexandre Bounine 			kref_put(&map->ref, mport_release_mapping);
1155e8de3701SAlexandre Bounine 			ret = 0;
1156e8de3701SAlexandre Bounine 			break;
1157e8de3701SAlexandre Bounine 		}
1158e8de3701SAlexandre Bounine 	}
1159e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
1160e8de3701SAlexandre Bounine 
1161e8de3701SAlexandre Bounine 	if (ret == -EFAULT) {
1162e8de3701SAlexandre Bounine 		rmcd_debug(DMA, "ERR no matching mapping");
1163e8de3701SAlexandre Bounine 		return ret;
1164e8de3701SAlexandre Bounine 	}
1165e8de3701SAlexandre Bounine 
1166e8de3701SAlexandre Bounine 	return 0;
1167e8de3701SAlexandre Bounine }
1168e8de3701SAlexandre Bounine #else
1169e8de3701SAlexandre Bounine static int rio_mport_transfer_ioctl(struct file *filp, void *arg)
1170e8de3701SAlexandre Bounine {
1171e8de3701SAlexandre Bounine 	return -ENODEV;
1172e8de3701SAlexandre Bounine }
1173e8de3701SAlexandre Bounine 
1174e8de3701SAlexandre Bounine static int rio_mport_wait_for_async_dma(struct file *filp, void __user *arg)
1175e8de3701SAlexandre Bounine {
1176e8de3701SAlexandre Bounine 	return -ENODEV;
1177e8de3701SAlexandre Bounine }
1178e8de3701SAlexandre Bounine 
1179e8de3701SAlexandre Bounine static int rio_mport_alloc_dma(struct file *filp, void __user *arg)
1180e8de3701SAlexandre Bounine {
1181e8de3701SAlexandre Bounine 	return -ENODEV;
1182e8de3701SAlexandre Bounine }
1183e8de3701SAlexandre Bounine 
1184e8de3701SAlexandre Bounine static int rio_mport_free_dma(struct file *filp, void __user *arg)
1185e8de3701SAlexandre Bounine {
1186e8de3701SAlexandre Bounine 	return -ENODEV;
1187e8de3701SAlexandre Bounine }
1188e8de3701SAlexandre Bounine #endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1189e8de3701SAlexandre Bounine 
1190e8de3701SAlexandre Bounine /*
1191e8de3701SAlexandre Bounine  * Inbound/outbound memory mapping functions
1192e8de3701SAlexandre Bounine  */
1193e8de3701SAlexandre Bounine 
1194e8de3701SAlexandre Bounine static int
1195e8de3701SAlexandre Bounine rio_mport_create_inbound_mapping(struct mport_dev *md, struct file *filp,
11964e1016daSAlexandre Bounine 				u64 raddr, u64 size,
1197e8de3701SAlexandre Bounine 				struct rio_mport_mapping **mapping)
1198e8de3701SAlexandre Bounine {
1199e8de3701SAlexandre Bounine 	struct rio_mport *mport = md->mport;
1200e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
1201e8de3701SAlexandre Bounine 	int ret;
1202e8de3701SAlexandre Bounine 
12034e1016daSAlexandre Bounine 	/* rio_map_inb_region() accepts u32 size */
12044e1016daSAlexandre Bounine 	if (size > 0xffffffff)
12054e1016daSAlexandre Bounine 		return -EINVAL;
12064e1016daSAlexandre Bounine 
12074e1016daSAlexandre Bounine 	map = kzalloc(sizeof(*map), GFP_KERNEL);
1208e8de3701SAlexandre Bounine 	if (map == NULL)
1209e8de3701SAlexandre Bounine 		return -ENOMEM;
1210e8de3701SAlexandre Bounine 
1211e8de3701SAlexandre Bounine 	map->virt_addr = dma_alloc_coherent(mport->dev.parent, size,
1212e8de3701SAlexandre Bounine 					    &map->phys_addr, GFP_KERNEL);
1213e8de3701SAlexandre Bounine 	if (map->virt_addr == NULL) {
1214e8de3701SAlexandre Bounine 		ret = -ENOMEM;
1215e8de3701SAlexandre Bounine 		goto err_dma_alloc;
1216e8de3701SAlexandre Bounine 	}
1217e8de3701SAlexandre Bounine 
1218e8de3701SAlexandre Bounine 	if (raddr == RIO_MAP_ANY_ADDR)
1219e8de3701SAlexandre Bounine 		raddr = map->phys_addr;
12204e1016daSAlexandre Bounine 	ret = rio_map_inb_region(mport, map->phys_addr, raddr, (u32)size, 0);
1221e8de3701SAlexandre Bounine 	if (ret < 0)
1222e8de3701SAlexandre Bounine 		goto err_map_inb;
1223e8de3701SAlexandre Bounine 
1224e8de3701SAlexandre Bounine 	map->dir = MAP_INBOUND;
1225e8de3701SAlexandre Bounine 	map->rio_addr = raddr;
1226e8de3701SAlexandre Bounine 	map->size = size;
1227e8de3701SAlexandre Bounine 	map->filp = filp;
1228e8de3701SAlexandre Bounine 	map->md = md;
1229e8de3701SAlexandre Bounine 	kref_init(&map->ref);
1230e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
1231e8de3701SAlexandre Bounine 	list_add_tail(&map->node, &md->mappings);
1232e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
1233e8de3701SAlexandre Bounine 	*mapping = map;
1234e8de3701SAlexandre Bounine 	return 0;
1235e8de3701SAlexandre Bounine 
1236e8de3701SAlexandre Bounine err_map_inb:
1237e8de3701SAlexandre Bounine 	dma_free_coherent(mport->dev.parent, size,
1238e8de3701SAlexandre Bounine 			  map->virt_addr, map->phys_addr);
1239e8de3701SAlexandre Bounine err_dma_alloc:
1240e8de3701SAlexandre Bounine 	kfree(map);
1241e8de3701SAlexandre Bounine 	return ret;
1242e8de3701SAlexandre Bounine }
1243e8de3701SAlexandre Bounine 
1244e8de3701SAlexandre Bounine static int
1245e8de3701SAlexandre Bounine rio_mport_get_inbound_mapping(struct mport_dev *md, struct file *filp,
12464e1016daSAlexandre Bounine 			      u64 raddr, u64 size,
1247e8de3701SAlexandre Bounine 			      struct rio_mport_mapping **mapping)
1248e8de3701SAlexandre Bounine {
1249e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
1250e8de3701SAlexandre Bounine 	int err = -ENOMEM;
1251e8de3701SAlexandre Bounine 
1252e8de3701SAlexandre Bounine 	if (raddr == RIO_MAP_ANY_ADDR)
1253e8de3701SAlexandre Bounine 		goto get_new;
1254e8de3701SAlexandre Bounine 
1255e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
1256e8de3701SAlexandre Bounine 	list_for_each_entry(map, &md->mappings, node) {
1257e8de3701SAlexandre Bounine 		if (map->dir != MAP_INBOUND)
1258e8de3701SAlexandre Bounine 			continue;
1259e8de3701SAlexandre Bounine 		if (raddr == map->rio_addr && size == map->size) {
1260e8de3701SAlexandre Bounine 			/* allow exact match only */
1261e8de3701SAlexandre Bounine 			*mapping = map;
1262e8de3701SAlexandre Bounine 			err = 0;
1263e8de3701SAlexandre Bounine 			break;
1264e8de3701SAlexandre Bounine 		} else if (raddr < (map->rio_addr + map->size - 1) &&
1265e8de3701SAlexandre Bounine 			   (raddr + size) > map->rio_addr) {
1266e8de3701SAlexandre Bounine 			err = -EBUSY;
1267e8de3701SAlexandre Bounine 			break;
1268e8de3701SAlexandre Bounine 		}
1269e8de3701SAlexandre Bounine 	}
1270e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
1271e8de3701SAlexandre Bounine 
1272e8de3701SAlexandre Bounine 	if (err != -ENOMEM)
1273e8de3701SAlexandre Bounine 		return err;
1274e8de3701SAlexandre Bounine get_new:
1275e8de3701SAlexandre Bounine 	/* not found, create new */
1276e8de3701SAlexandre Bounine 	return rio_mport_create_inbound_mapping(md, filp, raddr, size, mapping);
1277e8de3701SAlexandre Bounine }
1278e8de3701SAlexandre Bounine 
1279e8de3701SAlexandre Bounine static int rio_mport_map_inbound(struct file *filp, void __user *arg)
1280e8de3701SAlexandre Bounine {
1281e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
1282e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1283e8de3701SAlexandre Bounine 	struct rio_mmap map;
1284e8de3701SAlexandre Bounine 	struct rio_mport_mapping *mapping = NULL;
1285e8de3701SAlexandre Bounine 	int ret;
1286e8de3701SAlexandre Bounine 
1287e8de3701SAlexandre Bounine 	if (!md->mport->ops->map_inb)
1288e8de3701SAlexandre Bounine 		return -EPROTONOSUPPORT;
12894e1016daSAlexandre Bounine 	if (unlikely(copy_from_user(&map, arg, sizeof(map))))
1290e8de3701SAlexandre Bounine 		return -EFAULT;
1291e8de3701SAlexandre Bounine 
1292e8de3701SAlexandre Bounine 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1293e8de3701SAlexandre Bounine 
1294e8de3701SAlexandre Bounine 	ret = rio_mport_get_inbound_mapping(md, filp, map.rio_addr,
1295e8de3701SAlexandre Bounine 					    map.length, &mapping);
1296e8de3701SAlexandre Bounine 	if (ret)
1297e8de3701SAlexandre Bounine 		return ret;
1298e8de3701SAlexandre Bounine 
1299e8de3701SAlexandre Bounine 	map.handle = mapping->phys_addr;
1300e8de3701SAlexandre Bounine 	map.rio_addr = mapping->rio_addr;
1301e8de3701SAlexandre Bounine 
13024e1016daSAlexandre Bounine 	if (unlikely(copy_to_user(arg, &map, sizeof(map)))) {
1303e8de3701SAlexandre Bounine 		/* Delete mapping if it was created by this request */
1304e8de3701SAlexandre Bounine 		if (ret == 0 && mapping->filp == filp) {
1305e8de3701SAlexandre Bounine 			mutex_lock(&md->buf_mutex);
1306e8de3701SAlexandre Bounine 			kref_put(&mapping->ref, mport_release_mapping);
1307e8de3701SAlexandre Bounine 			mutex_unlock(&md->buf_mutex);
1308e8de3701SAlexandre Bounine 		}
1309e8de3701SAlexandre Bounine 		return -EFAULT;
1310e8de3701SAlexandre Bounine 	}
1311e8de3701SAlexandre Bounine 
1312e8de3701SAlexandre Bounine 	return 0;
1313e8de3701SAlexandre Bounine }
1314e8de3701SAlexandre Bounine 
1315e8de3701SAlexandre Bounine /*
1316e8de3701SAlexandre Bounine  * rio_mport_inbound_free() - unmap from RapidIO address space and free
1317e8de3701SAlexandre Bounine  *                    previously allocated inbound DMA coherent buffer
1318e8de3701SAlexandre Bounine  * @priv: driver private data
1319e8de3701SAlexandre Bounine  * @arg:  buffer handle returned by allocation routine
1320e8de3701SAlexandre Bounine  */
1321e8de3701SAlexandre Bounine static int rio_mport_inbound_free(struct file *filp, void __user *arg)
1322e8de3701SAlexandre Bounine {
1323e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
1324e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1325e8de3701SAlexandre Bounine 	u64 handle;
1326e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map, *_map;
1327e8de3701SAlexandre Bounine 
1328e8de3701SAlexandre Bounine 	rmcd_debug(IBW, "%s filp=%p", dev_name(&priv->md->dev), filp);
1329e8de3701SAlexandre Bounine 
1330e8de3701SAlexandre Bounine 	if (!md->mport->ops->unmap_inb)
1331e8de3701SAlexandre Bounine 		return -EPROTONOSUPPORT;
1332e8de3701SAlexandre Bounine 
13334e1016daSAlexandre Bounine 	if (copy_from_user(&handle, arg, sizeof(handle)))
1334e8de3701SAlexandre Bounine 		return -EFAULT;
1335e8de3701SAlexandre Bounine 
1336e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
1337e8de3701SAlexandre Bounine 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
1338e8de3701SAlexandre Bounine 		if (map->dir == MAP_INBOUND && map->phys_addr == handle) {
1339e8de3701SAlexandre Bounine 			if (map->filp == filp) {
1340e8de3701SAlexandre Bounine 				map->filp = NULL;
1341e8de3701SAlexandre Bounine 				kref_put(&map->ref, mport_release_mapping);
1342e8de3701SAlexandre Bounine 			}
1343e8de3701SAlexandre Bounine 			break;
1344e8de3701SAlexandre Bounine 		}
1345e8de3701SAlexandre Bounine 	}
1346e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
1347e8de3701SAlexandre Bounine 
1348e8de3701SAlexandre Bounine 	return 0;
1349e8de3701SAlexandre Bounine }
1350e8de3701SAlexandre Bounine 
1351e8de3701SAlexandre Bounine /*
1352e8de3701SAlexandre Bounine  * maint_port_idx_get() - Get the port index of the mport instance
1353e8de3701SAlexandre Bounine  * @priv: driver private data
1354e8de3701SAlexandre Bounine  * @arg:  port index
1355e8de3701SAlexandre Bounine  */
1356e8de3701SAlexandre Bounine static int maint_port_idx_get(struct mport_cdev_priv *priv, void __user *arg)
1357e8de3701SAlexandre Bounine {
1358e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
13594e1016daSAlexandre Bounine 	u32 port_idx = md->mport->index;
1360e8de3701SAlexandre Bounine 
1361e8de3701SAlexandre Bounine 	rmcd_debug(MPORT, "port_index=%d", port_idx);
1362e8de3701SAlexandre Bounine 
1363e8de3701SAlexandre Bounine 	if (copy_to_user(arg, &port_idx, sizeof(port_idx)))
1364e8de3701SAlexandre Bounine 		return -EFAULT;
1365e8de3701SAlexandre Bounine 
1366e8de3701SAlexandre Bounine 	return 0;
1367e8de3701SAlexandre Bounine }
1368e8de3701SAlexandre Bounine 
1369e8de3701SAlexandre Bounine static int rio_mport_add_event(struct mport_cdev_priv *priv,
1370e8de3701SAlexandre Bounine 			       struct rio_event *event)
1371e8de3701SAlexandre Bounine {
1372e8de3701SAlexandre Bounine 	int overflow;
1373e8de3701SAlexandre Bounine 
1374e8de3701SAlexandre Bounine 	if (!(priv->event_mask & event->header))
1375e8de3701SAlexandre Bounine 		return -EACCES;
1376e8de3701SAlexandre Bounine 
1377e8de3701SAlexandre Bounine 	spin_lock(&priv->fifo_lock);
1378e8de3701SAlexandre Bounine 	overflow = kfifo_avail(&priv->event_fifo) < sizeof(*event)
1379e8de3701SAlexandre Bounine 		|| kfifo_in(&priv->event_fifo, (unsigned char *)event,
1380e8de3701SAlexandre Bounine 			sizeof(*event)) != sizeof(*event);
1381e8de3701SAlexandre Bounine 	spin_unlock(&priv->fifo_lock);
1382e8de3701SAlexandre Bounine 
1383e8de3701SAlexandre Bounine 	wake_up_interruptible(&priv->event_rx_wait);
1384e8de3701SAlexandre Bounine 
1385e8de3701SAlexandre Bounine 	if (overflow) {
1386e8de3701SAlexandre Bounine 		dev_warn(&priv->md->dev, DRV_NAME ": event fifo overflow\n");
1387e8de3701SAlexandre Bounine 		return -EBUSY;
1388e8de3701SAlexandre Bounine 	}
1389e8de3701SAlexandre Bounine 
1390e8de3701SAlexandre Bounine 	return 0;
1391e8de3701SAlexandre Bounine }
1392e8de3701SAlexandre Bounine 
1393e8de3701SAlexandre Bounine static void rio_mport_doorbell_handler(struct rio_mport *mport, void *dev_id,
1394e8de3701SAlexandre Bounine 				       u16 src, u16 dst, u16 info)
1395e8de3701SAlexandre Bounine {
1396e8de3701SAlexandre Bounine 	struct mport_dev *data = dev_id;
1397e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
1398e8de3701SAlexandre Bounine 	struct rio_mport_db_filter *db_filter;
1399e8de3701SAlexandre Bounine 	struct rio_event event;
1400e8de3701SAlexandre Bounine 	int handled;
1401e8de3701SAlexandre Bounine 
1402e8de3701SAlexandre Bounine 	event.header = RIO_DOORBELL;
1403e8de3701SAlexandre Bounine 	event.u.doorbell.rioid = src;
1404e8de3701SAlexandre Bounine 	event.u.doorbell.payload = info;
1405e8de3701SAlexandre Bounine 
1406e8de3701SAlexandre Bounine 	handled = 0;
1407e8de3701SAlexandre Bounine 	spin_lock(&data->db_lock);
1408e8de3701SAlexandre Bounine 	list_for_each_entry(db_filter, &data->doorbells, data_node) {
14094e1016daSAlexandre Bounine 		if (((db_filter->filter.rioid == RIO_INVALID_DESTID ||
1410e8de3701SAlexandre Bounine 		      db_filter->filter.rioid == src)) &&
1411e8de3701SAlexandre Bounine 		      info >= db_filter->filter.low &&
1412e8de3701SAlexandre Bounine 		      info <= db_filter->filter.high) {
1413e8de3701SAlexandre Bounine 			priv = db_filter->priv;
1414e8de3701SAlexandre Bounine 			rio_mport_add_event(priv, &event);
1415e8de3701SAlexandre Bounine 			handled = 1;
1416e8de3701SAlexandre Bounine 		}
1417e8de3701SAlexandre Bounine 	}
1418e8de3701SAlexandre Bounine 	spin_unlock(&data->db_lock);
1419e8de3701SAlexandre Bounine 
1420e8de3701SAlexandre Bounine 	if (!handled)
1421e8de3701SAlexandre Bounine 		dev_warn(&data->dev,
1422e8de3701SAlexandre Bounine 			"%s: spurious DB received from 0x%x, info=0x%04x\n",
1423e8de3701SAlexandre Bounine 			__func__, src, info);
1424e8de3701SAlexandre Bounine }
1425e8de3701SAlexandre Bounine 
1426e8de3701SAlexandre Bounine static int rio_mport_add_db_filter(struct mport_cdev_priv *priv,
1427e8de3701SAlexandre Bounine 				   void __user *arg)
1428e8de3701SAlexandre Bounine {
1429e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1430e8de3701SAlexandre Bounine 	struct rio_mport_db_filter *db_filter;
1431e8de3701SAlexandre Bounine 	struct rio_doorbell_filter filter;
1432e8de3701SAlexandre Bounine 	unsigned long flags;
1433e8de3701SAlexandre Bounine 	int ret;
1434e8de3701SAlexandre Bounine 
1435e8de3701SAlexandre Bounine 	if (copy_from_user(&filter, arg, sizeof(filter)))
1436e8de3701SAlexandre Bounine 		return -EFAULT;
1437e8de3701SAlexandre Bounine 
1438e8de3701SAlexandre Bounine 	if (filter.low > filter.high)
1439e8de3701SAlexandre Bounine 		return -EINVAL;
1440e8de3701SAlexandre Bounine 
1441e8de3701SAlexandre Bounine 	ret = rio_request_inb_dbell(md->mport, md, filter.low, filter.high,
1442e8de3701SAlexandre Bounine 				    rio_mport_doorbell_handler);
1443e8de3701SAlexandre Bounine 	if (ret) {
1444e8de3701SAlexandre Bounine 		rmcd_error("%s failed to register IBDB, err=%d",
1445e8de3701SAlexandre Bounine 			   dev_name(&md->dev), ret);
1446e8de3701SAlexandre Bounine 		return ret;
1447e8de3701SAlexandre Bounine 	}
1448e8de3701SAlexandre Bounine 
1449e8de3701SAlexandre Bounine 	db_filter = kzalloc(sizeof(*db_filter), GFP_KERNEL);
1450e8de3701SAlexandre Bounine 	if (db_filter == NULL) {
1451e8de3701SAlexandre Bounine 		rio_release_inb_dbell(md->mport, filter.low, filter.high);
1452e8de3701SAlexandre Bounine 		return -ENOMEM;
1453e8de3701SAlexandre Bounine 	}
1454e8de3701SAlexandre Bounine 
1455e8de3701SAlexandre Bounine 	db_filter->filter = filter;
1456e8de3701SAlexandre Bounine 	db_filter->priv = priv;
1457e8de3701SAlexandre Bounine 	spin_lock_irqsave(&md->db_lock, flags);
1458e8de3701SAlexandre Bounine 	list_add_tail(&db_filter->priv_node, &priv->db_filters);
1459e8de3701SAlexandre Bounine 	list_add_tail(&db_filter->data_node, &md->doorbells);
1460e8de3701SAlexandre Bounine 	spin_unlock_irqrestore(&md->db_lock, flags);
1461e8de3701SAlexandre Bounine 
1462e8de3701SAlexandre Bounine 	return 0;
1463e8de3701SAlexandre Bounine }
1464e8de3701SAlexandre Bounine 
1465e8de3701SAlexandre Bounine static void rio_mport_delete_db_filter(struct rio_mport_db_filter *db_filter)
1466e8de3701SAlexandre Bounine {
1467e8de3701SAlexandre Bounine 	list_del(&db_filter->data_node);
1468e8de3701SAlexandre Bounine 	list_del(&db_filter->priv_node);
1469e8de3701SAlexandre Bounine 	kfree(db_filter);
1470e8de3701SAlexandre Bounine }
1471e8de3701SAlexandre Bounine 
1472e8de3701SAlexandre Bounine static int rio_mport_remove_db_filter(struct mport_cdev_priv *priv,
1473e8de3701SAlexandre Bounine 				      void __user *arg)
1474e8de3701SAlexandre Bounine {
1475e8de3701SAlexandre Bounine 	struct rio_mport_db_filter *db_filter;
1476e8de3701SAlexandre Bounine 	struct rio_doorbell_filter filter;
1477e8de3701SAlexandre Bounine 	unsigned long flags;
1478e8de3701SAlexandre Bounine 	int ret = -EINVAL;
1479e8de3701SAlexandre Bounine 
1480e8de3701SAlexandre Bounine 	if (copy_from_user(&filter, arg, sizeof(filter)))
1481e8de3701SAlexandre Bounine 		return -EFAULT;
1482e8de3701SAlexandre Bounine 
14834e1016daSAlexandre Bounine 	if (filter.low > filter.high)
14844e1016daSAlexandre Bounine 		return -EINVAL;
14854e1016daSAlexandre Bounine 
1486e8de3701SAlexandre Bounine 	spin_lock_irqsave(&priv->md->db_lock, flags);
1487e8de3701SAlexandre Bounine 	list_for_each_entry(db_filter, &priv->db_filters, priv_node) {
1488e8de3701SAlexandre Bounine 		if (db_filter->filter.rioid == filter.rioid &&
1489e8de3701SAlexandre Bounine 		    db_filter->filter.low == filter.low &&
1490e8de3701SAlexandre Bounine 		    db_filter->filter.high == filter.high) {
1491e8de3701SAlexandre Bounine 			rio_mport_delete_db_filter(db_filter);
1492e8de3701SAlexandre Bounine 			ret = 0;
1493e8de3701SAlexandre Bounine 			break;
1494e8de3701SAlexandre Bounine 		}
1495e8de3701SAlexandre Bounine 	}
1496e8de3701SAlexandre Bounine 	spin_unlock_irqrestore(&priv->md->db_lock, flags);
1497e8de3701SAlexandre Bounine 
1498e8de3701SAlexandre Bounine 	if (!ret)
1499e8de3701SAlexandre Bounine 		rio_release_inb_dbell(priv->md->mport, filter.low, filter.high);
1500e8de3701SAlexandre Bounine 
1501e8de3701SAlexandre Bounine 	return ret;
1502e8de3701SAlexandre Bounine }
1503e8de3701SAlexandre Bounine 
1504e8de3701SAlexandre Bounine static int rio_mport_match_pw(union rio_pw_msg *msg,
1505e8de3701SAlexandre Bounine 			      struct rio_pw_filter *filter)
1506e8de3701SAlexandre Bounine {
1507e8de3701SAlexandre Bounine 	if ((msg->em.comptag & filter->mask) < filter->low ||
1508e8de3701SAlexandre Bounine 		(msg->em.comptag & filter->mask) > filter->high)
1509e8de3701SAlexandre Bounine 		return 0;
1510e8de3701SAlexandre Bounine 	return 1;
1511e8de3701SAlexandre Bounine }
1512e8de3701SAlexandre Bounine 
1513e8de3701SAlexandre Bounine static int rio_mport_pw_handler(struct rio_mport *mport, void *context,
1514e8de3701SAlexandre Bounine 				union rio_pw_msg *msg, int step)
1515e8de3701SAlexandre Bounine {
1516e8de3701SAlexandre Bounine 	struct mport_dev *md = context;
1517e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
1518e8de3701SAlexandre Bounine 	struct rio_mport_pw_filter *pw_filter;
1519e8de3701SAlexandre Bounine 	struct rio_event event;
1520e8de3701SAlexandre Bounine 	int handled;
1521e8de3701SAlexandre Bounine 
1522e8de3701SAlexandre Bounine 	event.header = RIO_PORTWRITE;
1523e8de3701SAlexandre Bounine 	memcpy(event.u.portwrite.payload, msg->raw, RIO_PW_MSG_SIZE);
1524e8de3701SAlexandre Bounine 
1525e8de3701SAlexandre Bounine 	handled = 0;
1526e8de3701SAlexandre Bounine 	spin_lock(&md->pw_lock);
1527e8de3701SAlexandre Bounine 	list_for_each_entry(pw_filter, &md->portwrites, md_node) {
1528e8de3701SAlexandre Bounine 		if (rio_mport_match_pw(msg, &pw_filter->filter)) {
1529e8de3701SAlexandre Bounine 			priv = pw_filter->priv;
1530e8de3701SAlexandre Bounine 			rio_mport_add_event(priv, &event);
1531e8de3701SAlexandre Bounine 			handled = 1;
1532e8de3701SAlexandre Bounine 		}
1533e8de3701SAlexandre Bounine 	}
1534e8de3701SAlexandre Bounine 	spin_unlock(&md->pw_lock);
1535e8de3701SAlexandre Bounine 
1536e8de3701SAlexandre Bounine 	if (!handled) {
1537e8de3701SAlexandre Bounine 		printk_ratelimited(KERN_WARNING DRV_NAME
1538e8de3701SAlexandre Bounine 			": mport%d received spurious PW from 0x%08x\n",
1539e8de3701SAlexandre Bounine 			mport->id, msg->em.comptag);
1540e8de3701SAlexandre Bounine 	}
1541e8de3701SAlexandre Bounine 
1542e8de3701SAlexandre Bounine 	return 0;
1543e8de3701SAlexandre Bounine }
1544e8de3701SAlexandre Bounine 
1545e8de3701SAlexandre Bounine static int rio_mport_add_pw_filter(struct mport_cdev_priv *priv,
1546e8de3701SAlexandre Bounine 				   void __user *arg)
1547e8de3701SAlexandre Bounine {
1548e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1549e8de3701SAlexandre Bounine 	struct rio_mport_pw_filter *pw_filter;
1550e8de3701SAlexandre Bounine 	struct rio_pw_filter filter;
1551e8de3701SAlexandre Bounine 	unsigned long flags;
1552e8de3701SAlexandre Bounine 	int hadd = 0;
1553e8de3701SAlexandre Bounine 
1554e8de3701SAlexandre Bounine 	if (copy_from_user(&filter, arg, sizeof(filter)))
1555e8de3701SAlexandre Bounine 		return -EFAULT;
1556e8de3701SAlexandre Bounine 
1557e8de3701SAlexandre Bounine 	pw_filter = kzalloc(sizeof(*pw_filter), GFP_KERNEL);
1558e8de3701SAlexandre Bounine 	if (pw_filter == NULL)
1559e8de3701SAlexandre Bounine 		return -ENOMEM;
1560e8de3701SAlexandre Bounine 
1561e8de3701SAlexandre Bounine 	pw_filter->filter = filter;
1562e8de3701SAlexandre Bounine 	pw_filter->priv = priv;
1563e8de3701SAlexandre Bounine 	spin_lock_irqsave(&md->pw_lock, flags);
1564e8de3701SAlexandre Bounine 	if (list_empty(&md->portwrites))
1565e8de3701SAlexandre Bounine 		hadd = 1;
1566e8de3701SAlexandre Bounine 	list_add_tail(&pw_filter->priv_node, &priv->pw_filters);
1567e8de3701SAlexandre Bounine 	list_add_tail(&pw_filter->md_node, &md->portwrites);
1568e8de3701SAlexandre Bounine 	spin_unlock_irqrestore(&md->pw_lock, flags);
1569e8de3701SAlexandre Bounine 
1570e8de3701SAlexandre Bounine 	if (hadd) {
1571e8de3701SAlexandre Bounine 		int ret;
1572e8de3701SAlexandre Bounine 
1573e8de3701SAlexandre Bounine 		ret = rio_add_mport_pw_handler(md->mport, md,
1574e8de3701SAlexandre Bounine 					       rio_mport_pw_handler);
1575e8de3701SAlexandre Bounine 		if (ret) {
1576e8de3701SAlexandre Bounine 			dev_err(&md->dev,
1577e8de3701SAlexandre Bounine 				"%s: failed to add IB_PW handler, err=%d\n",
1578e8de3701SAlexandre Bounine 				__func__, ret);
1579e8de3701SAlexandre Bounine 			return ret;
1580e8de3701SAlexandre Bounine 		}
1581e8de3701SAlexandre Bounine 		rio_pw_enable(md->mport, 1);
1582e8de3701SAlexandre Bounine 	}
1583e8de3701SAlexandre Bounine 
1584e8de3701SAlexandre Bounine 	return 0;
1585e8de3701SAlexandre Bounine }
1586e8de3701SAlexandre Bounine 
1587e8de3701SAlexandre Bounine static void rio_mport_delete_pw_filter(struct rio_mport_pw_filter *pw_filter)
1588e8de3701SAlexandre Bounine {
1589e8de3701SAlexandre Bounine 	list_del(&pw_filter->md_node);
1590e8de3701SAlexandre Bounine 	list_del(&pw_filter->priv_node);
1591e8de3701SAlexandre Bounine 	kfree(pw_filter);
1592e8de3701SAlexandre Bounine }
1593e8de3701SAlexandre Bounine 
1594e8de3701SAlexandre Bounine static int rio_mport_match_pw_filter(struct rio_pw_filter *a,
1595e8de3701SAlexandre Bounine 				     struct rio_pw_filter *b)
1596e8de3701SAlexandre Bounine {
1597e8de3701SAlexandre Bounine 	if ((a->mask == b->mask) && (a->low == b->low) && (a->high == b->high))
1598e8de3701SAlexandre Bounine 		return 1;
1599e8de3701SAlexandre Bounine 	return 0;
1600e8de3701SAlexandre Bounine }
1601e8de3701SAlexandre Bounine 
1602e8de3701SAlexandre Bounine static int rio_mport_remove_pw_filter(struct mport_cdev_priv *priv,
1603e8de3701SAlexandre Bounine 				      void __user *arg)
1604e8de3701SAlexandre Bounine {
1605e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1606e8de3701SAlexandre Bounine 	struct rio_mport_pw_filter *pw_filter;
1607e8de3701SAlexandre Bounine 	struct rio_pw_filter filter;
1608e8de3701SAlexandre Bounine 	unsigned long flags;
1609e8de3701SAlexandre Bounine 	int ret = -EINVAL;
1610e8de3701SAlexandre Bounine 	int hdel = 0;
1611e8de3701SAlexandre Bounine 
1612e8de3701SAlexandre Bounine 	if (copy_from_user(&filter, arg, sizeof(filter)))
1613e8de3701SAlexandre Bounine 		return -EFAULT;
1614e8de3701SAlexandre Bounine 
1615e8de3701SAlexandre Bounine 	spin_lock_irqsave(&md->pw_lock, flags);
1616e8de3701SAlexandre Bounine 	list_for_each_entry(pw_filter, &priv->pw_filters, priv_node) {
1617e8de3701SAlexandre Bounine 		if (rio_mport_match_pw_filter(&pw_filter->filter, &filter)) {
1618e8de3701SAlexandre Bounine 			rio_mport_delete_pw_filter(pw_filter);
1619e8de3701SAlexandre Bounine 			ret = 0;
1620e8de3701SAlexandre Bounine 			break;
1621e8de3701SAlexandre Bounine 		}
1622e8de3701SAlexandre Bounine 	}
1623e8de3701SAlexandre Bounine 
1624e8de3701SAlexandre Bounine 	if (list_empty(&md->portwrites))
1625e8de3701SAlexandre Bounine 		hdel = 1;
1626e8de3701SAlexandre Bounine 	spin_unlock_irqrestore(&md->pw_lock, flags);
1627e8de3701SAlexandre Bounine 
1628e8de3701SAlexandre Bounine 	if (hdel) {
1629e8de3701SAlexandre Bounine 		rio_del_mport_pw_handler(md->mport, priv->md,
1630e8de3701SAlexandre Bounine 					 rio_mport_pw_handler);
1631e8de3701SAlexandre Bounine 		rio_pw_enable(md->mport, 0);
1632e8de3701SAlexandre Bounine 	}
1633e8de3701SAlexandre Bounine 
1634e8de3701SAlexandre Bounine 	return ret;
1635e8de3701SAlexandre Bounine }
1636e8de3701SAlexandre Bounine 
1637e8de3701SAlexandre Bounine /*
1638e8de3701SAlexandre Bounine  * rio_release_dev - release routine for kernel RIO device object
1639e8de3701SAlexandre Bounine  * @dev: kernel device object associated with a RIO device structure
1640e8de3701SAlexandre Bounine  *
1641e8de3701SAlexandre Bounine  * Frees a RIO device struct associated a RIO device struct.
1642e8de3701SAlexandre Bounine  * The RIO device struct is freed.
1643e8de3701SAlexandre Bounine  */
1644e8de3701SAlexandre Bounine static void rio_release_dev(struct device *dev)
1645e8de3701SAlexandre Bounine {
1646e8de3701SAlexandre Bounine 	struct rio_dev *rdev;
1647e8de3701SAlexandre Bounine 
1648e8de3701SAlexandre Bounine 	rdev = to_rio_dev(dev);
1649e8de3701SAlexandre Bounine 	pr_info(DRV_PREFIX "%s: %s\n", __func__, rio_name(rdev));
1650e8de3701SAlexandre Bounine 	kfree(rdev);
1651e8de3701SAlexandre Bounine }
1652e8de3701SAlexandre Bounine 
1653e8de3701SAlexandre Bounine 
1654e8de3701SAlexandre Bounine static void rio_release_net(struct device *dev)
1655e8de3701SAlexandre Bounine {
1656e8de3701SAlexandre Bounine 	struct rio_net *net;
1657e8de3701SAlexandre Bounine 
1658e8de3701SAlexandre Bounine 	net = to_rio_net(dev);
1659e8de3701SAlexandre Bounine 	rmcd_debug(RDEV, "net_%d", net->id);
1660e8de3701SAlexandre Bounine 	kfree(net);
1661e8de3701SAlexandre Bounine }
1662e8de3701SAlexandre Bounine 
1663e8de3701SAlexandre Bounine 
1664e8de3701SAlexandre Bounine /*
1665e8de3701SAlexandre Bounine  * rio_mport_add_riodev - creates a kernel RIO device object
1666e8de3701SAlexandre Bounine  *
1667e8de3701SAlexandre Bounine  * Allocates a RIO device data structure and initializes required fields based
1668e8de3701SAlexandre Bounine  * on device's configuration space contents.
1669e8de3701SAlexandre Bounine  * If the device has switch capabilities, then a switch specific portion is
1670e8de3701SAlexandre Bounine  * allocated and configured.
1671e8de3701SAlexandre Bounine  */
1672e8de3701SAlexandre Bounine static int rio_mport_add_riodev(struct mport_cdev_priv *priv,
1673e8de3701SAlexandre Bounine 				   void __user *arg)
1674e8de3701SAlexandre Bounine {
1675e8de3701SAlexandre Bounine 	struct mport_dev *md = priv->md;
1676e8de3701SAlexandre Bounine 	struct rio_rdev_info dev_info;
1677e8de3701SAlexandre Bounine 	struct rio_dev *rdev;
1678e8de3701SAlexandre Bounine 	struct rio_switch *rswitch = NULL;
1679e8de3701SAlexandre Bounine 	struct rio_mport *mport;
1680e8de3701SAlexandre Bounine 	size_t size;
1681e8de3701SAlexandre Bounine 	u32 rval;
1682e8de3701SAlexandre Bounine 	u32 swpinfo = 0;
1683e8de3701SAlexandre Bounine 	u16 destid;
1684e8de3701SAlexandre Bounine 	u8 hopcount;
1685e8de3701SAlexandre Bounine 	int err;
1686e8de3701SAlexandre Bounine 
1687e8de3701SAlexandre Bounine 	if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1688e8de3701SAlexandre Bounine 		return -EFAULT;
1689e8de3701SAlexandre Bounine 
1690e8de3701SAlexandre Bounine 	rmcd_debug(RDEV, "name:%s ct:0x%x did:0x%x hc:0x%x", dev_info.name,
1691e8de3701SAlexandre Bounine 		   dev_info.comptag, dev_info.destid, dev_info.hopcount);
1692e8de3701SAlexandre Bounine 
1693e8de3701SAlexandre Bounine 	if (bus_find_device_by_name(&rio_bus_type, NULL, dev_info.name)) {
1694e8de3701SAlexandre Bounine 		rmcd_debug(RDEV, "device %s already exists", dev_info.name);
1695e8de3701SAlexandre Bounine 		return -EEXIST;
1696e8de3701SAlexandre Bounine 	}
1697e8de3701SAlexandre Bounine 
16984e1016daSAlexandre Bounine 	size = sizeof(*rdev);
1699e8de3701SAlexandre Bounine 	mport = md->mport;
17004e1016daSAlexandre Bounine 	destid = dev_info.destid;
17014e1016daSAlexandre Bounine 	hopcount = dev_info.hopcount;
1702e8de3701SAlexandre Bounine 
1703e8de3701SAlexandre Bounine 	if (rio_mport_read_config_32(mport, destid, hopcount,
1704e8de3701SAlexandre Bounine 				     RIO_PEF_CAR, &rval))
1705e8de3701SAlexandre Bounine 		return -EIO;
1706e8de3701SAlexandre Bounine 
1707e8de3701SAlexandre Bounine 	if (rval & RIO_PEF_SWITCH) {
1708e8de3701SAlexandre Bounine 		rio_mport_read_config_32(mport, destid, hopcount,
1709e8de3701SAlexandre Bounine 					 RIO_SWP_INFO_CAR, &swpinfo);
1710e8de3701SAlexandre Bounine 		size += (RIO_GET_TOTAL_PORTS(swpinfo) *
1711e8de3701SAlexandre Bounine 			 sizeof(rswitch->nextdev[0])) + sizeof(*rswitch);
1712e8de3701SAlexandre Bounine 	}
1713e8de3701SAlexandre Bounine 
1714e8de3701SAlexandre Bounine 	rdev = kzalloc(size, GFP_KERNEL);
1715e8de3701SAlexandre Bounine 	if (rdev == NULL)
1716e8de3701SAlexandre Bounine 		return -ENOMEM;
1717e8de3701SAlexandre Bounine 
1718e8de3701SAlexandre Bounine 	if (mport->net == NULL) {
1719e8de3701SAlexandre Bounine 		struct rio_net *net;
1720e8de3701SAlexandre Bounine 
1721e8de3701SAlexandre Bounine 		net = rio_alloc_net(mport);
1722e8de3701SAlexandre Bounine 		if (!net) {
1723e8de3701SAlexandre Bounine 			err = -ENOMEM;
1724e8de3701SAlexandre Bounine 			rmcd_debug(RDEV, "failed to allocate net object");
1725e8de3701SAlexandre Bounine 			goto cleanup;
1726e8de3701SAlexandre Bounine 		}
1727e8de3701SAlexandre Bounine 
1728e8de3701SAlexandre Bounine 		net->id = mport->id;
1729e8de3701SAlexandre Bounine 		net->hport = mport;
1730e8de3701SAlexandre Bounine 		dev_set_name(&net->dev, "rnet_%d", net->id);
1731e8de3701SAlexandre Bounine 		net->dev.parent = &mport->dev;
1732e8de3701SAlexandre Bounine 		net->dev.release = rio_release_net;
1733e8de3701SAlexandre Bounine 		err = rio_add_net(net);
1734e8de3701SAlexandre Bounine 		if (err) {
1735e8de3701SAlexandre Bounine 			rmcd_debug(RDEV, "failed to register net, err=%d", err);
1736e8de3701SAlexandre Bounine 			kfree(net);
1737e8de3701SAlexandre Bounine 			goto cleanup;
1738e8de3701SAlexandre Bounine 		}
1739e8de3701SAlexandre Bounine 	}
1740e8de3701SAlexandre Bounine 
1741e8de3701SAlexandre Bounine 	rdev->net = mport->net;
1742e8de3701SAlexandre Bounine 	rdev->pef = rval;
1743e8de3701SAlexandre Bounine 	rdev->swpinfo = swpinfo;
1744e8de3701SAlexandre Bounine 	rio_mport_read_config_32(mport, destid, hopcount,
1745e8de3701SAlexandre Bounine 				 RIO_DEV_ID_CAR, &rval);
1746e8de3701SAlexandre Bounine 	rdev->did = rval >> 16;
1747e8de3701SAlexandre Bounine 	rdev->vid = rval & 0xffff;
1748e8de3701SAlexandre Bounine 	rio_mport_read_config_32(mport, destid, hopcount, RIO_DEV_INFO_CAR,
1749e8de3701SAlexandre Bounine 				 &rdev->device_rev);
1750e8de3701SAlexandre Bounine 	rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_ID_CAR,
1751e8de3701SAlexandre Bounine 				 &rval);
1752e8de3701SAlexandre Bounine 	rdev->asm_did = rval >> 16;
1753e8de3701SAlexandre Bounine 	rdev->asm_vid = rval & 0xffff;
1754e8de3701SAlexandre Bounine 	rio_mport_read_config_32(mport, destid, hopcount, RIO_ASM_INFO_CAR,
1755e8de3701SAlexandre Bounine 				 &rval);
1756e8de3701SAlexandre Bounine 	rdev->asm_rev = rval >> 16;
1757e8de3701SAlexandre Bounine 
1758e8de3701SAlexandre Bounine 	if (rdev->pef & RIO_PEF_EXT_FEATURES) {
1759e8de3701SAlexandre Bounine 		rdev->efptr = rval & 0xffff;
1760e8de3701SAlexandre Bounine 		rdev->phys_efptr = rio_mport_get_physefb(mport, 0, destid,
17611ae842deSAlexandre Bounine 						hopcount, &rdev->phys_rmap);
1762e8de3701SAlexandre Bounine 
1763e8de3701SAlexandre Bounine 		rdev->em_efptr = rio_mport_get_feature(mport, 0, destid,
1764e8de3701SAlexandre Bounine 						hopcount, RIO_EFB_ERR_MGMNT);
1765e8de3701SAlexandre Bounine 	}
1766e8de3701SAlexandre Bounine 
1767e8de3701SAlexandre Bounine 	rio_mport_read_config_32(mport, destid, hopcount, RIO_SRC_OPS_CAR,
1768e8de3701SAlexandre Bounine 				 &rdev->src_ops);
1769e8de3701SAlexandre Bounine 	rio_mport_read_config_32(mport, destid, hopcount, RIO_DST_OPS_CAR,
1770e8de3701SAlexandre Bounine 				 &rdev->dst_ops);
1771e8de3701SAlexandre Bounine 
1772e8de3701SAlexandre Bounine 	rdev->comp_tag = dev_info.comptag;
1773e8de3701SAlexandre Bounine 	rdev->destid = destid;
1774e8de3701SAlexandre Bounine 	/* hopcount is stored as specified by a caller, regardles of EP or SW */
1775e8de3701SAlexandre Bounine 	rdev->hopcount = hopcount;
1776e8de3701SAlexandre Bounine 
1777e8de3701SAlexandre Bounine 	if (rdev->pef & RIO_PEF_SWITCH) {
1778e8de3701SAlexandre Bounine 		rswitch = rdev->rswitch;
1779e8de3701SAlexandre Bounine 		rswitch->route_table = NULL;
1780e8de3701SAlexandre Bounine 	}
1781e8de3701SAlexandre Bounine 
1782e8de3701SAlexandre Bounine 	if (strlen(dev_info.name))
1783e8de3701SAlexandre Bounine 		dev_set_name(&rdev->dev, "%s", dev_info.name);
1784e8de3701SAlexandre Bounine 	else if (rdev->pef & RIO_PEF_SWITCH)
1785e8de3701SAlexandre Bounine 		dev_set_name(&rdev->dev, "%02x:s:%04x", mport->id,
1786e8de3701SAlexandre Bounine 			     rdev->comp_tag & RIO_CTAG_UDEVID);
1787e8de3701SAlexandre Bounine 	else
1788e8de3701SAlexandre Bounine 		dev_set_name(&rdev->dev, "%02x:e:%04x", mport->id,
1789e8de3701SAlexandre Bounine 			     rdev->comp_tag & RIO_CTAG_UDEVID);
1790e8de3701SAlexandre Bounine 
1791e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&rdev->net_list);
1792e8de3701SAlexandre Bounine 	rdev->dev.parent = &mport->net->dev;
1793e8de3701SAlexandre Bounine 	rio_attach_device(rdev);
1794e8de3701SAlexandre Bounine 	rdev->dev.release = rio_release_dev;
1795e8de3701SAlexandre Bounine 
1796e8de3701SAlexandre Bounine 	if (rdev->dst_ops & RIO_DST_OPS_DOORBELL)
1797e8de3701SAlexandre Bounine 		rio_init_dbell_res(&rdev->riores[RIO_DOORBELL_RESOURCE],
1798e8de3701SAlexandre Bounine 				   0, 0xffff);
1799e8de3701SAlexandre Bounine 	err = rio_add_device(rdev);
1800e8de3701SAlexandre Bounine 	if (err)
1801e8de3701SAlexandre Bounine 		goto cleanup;
1802e8de3701SAlexandre Bounine 	rio_dev_get(rdev);
1803e8de3701SAlexandre Bounine 
1804e8de3701SAlexandre Bounine 	return 0;
1805e8de3701SAlexandre Bounine cleanup:
1806e8de3701SAlexandre Bounine 	kfree(rdev);
1807e8de3701SAlexandre Bounine 	return err;
1808e8de3701SAlexandre Bounine }
1809e8de3701SAlexandre Bounine 
1810e8de3701SAlexandre Bounine static int rio_mport_del_riodev(struct mport_cdev_priv *priv, void __user *arg)
1811e8de3701SAlexandre Bounine {
1812e8de3701SAlexandre Bounine 	struct rio_rdev_info dev_info;
1813e8de3701SAlexandre Bounine 	struct rio_dev *rdev = NULL;
1814e8de3701SAlexandre Bounine 	struct device  *dev;
1815e8de3701SAlexandre Bounine 	struct rio_mport *mport;
1816e8de3701SAlexandre Bounine 	struct rio_net *net;
1817e8de3701SAlexandre Bounine 
1818e8de3701SAlexandre Bounine 	if (copy_from_user(&dev_info, arg, sizeof(dev_info)))
1819e8de3701SAlexandre Bounine 		return -EFAULT;
1820e8de3701SAlexandre Bounine 
1821e8de3701SAlexandre Bounine 	mport = priv->md->mport;
1822e8de3701SAlexandre Bounine 
1823e8de3701SAlexandre Bounine 	/* If device name is specified, removal by name has priority */
1824e8de3701SAlexandre Bounine 	if (strlen(dev_info.name)) {
1825e8de3701SAlexandre Bounine 		dev = bus_find_device_by_name(&rio_bus_type, NULL,
1826e8de3701SAlexandre Bounine 					      dev_info.name);
1827e8de3701SAlexandre Bounine 		if (dev)
1828e8de3701SAlexandre Bounine 			rdev = to_rio_dev(dev);
1829e8de3701SAlexandre Bounine 	} else {
1830e8de3701SAlexandre Bounine 		do {
1831e8de3701SAlexandre Bounine 			rdev = rio_get_comptag(dev_info.comptag, rdev);
1832e8de3701SAlexandre Bounine 			if (rdev && rdev->dev.parent == &mport->net->dev &&
18334e1016daSAlexandre Bounine 			    rdev->destid == dev_info.destid &&
18344e1016daSAlexandre Bounine 			    rdev->hopcount == dev_info.hopcount)
1835e8de3701SAlexandre Bounine 				break;
1836e8de3701SAlexandre Bounine 		} while (rdev);
1837e8de3701SAlexandre Bounine 	}
1838e8de3701SAlexandre Bounine 
1839e8de3701SAlexandre Bounine 	if (!rdev) {
1840e8de3701SAlexandre Bounine 		rmcd_debug(RDEV,
1841e8de3701SAlexandre Bounine 			"device name:%s ct:0x%x did:0x%x hc:0x%x not found",
1842e8de3701SAlexandre Bounine 			dev_info.name, dev_info.comptag, dev_info.destid,
1843e8de3701SAlexandre Bounine 			dev_info.hopcount);
1844e8de3701SAlexandre Bounine 		return -ENODEV;
1845e8de3701SAlexandre Bounine 	}
1846e8de3701SAlexandre Bounine 
1847e8de3701SAlexandre Bounine 	net = rdev->net;
1848e8de3701SAlexandre Bounine 	rio_dev_put(rdev);
1849e8de3701SAlexandre Bounine 	rio_del_device(rdev, RIO_DEVICE_SHUTDOWN);
1850e8de3701SAlexandre Bounine 
1851e8de3701SAlexandre Bounine 	if (list_empty(&net->devices)) {
1852e8de3701SAlexandre Bounine 		rio_free_net(net);
1853e8de3701SAlexandre Bounine 		mport->net = NULL;
1854e8de3701SAlexandre Bounine 	}
1855e8de3701SAlexandre Bounine 
1856e8de3701SAlexandre Bounine 	return 0;
1857e8de3701SAlexandre Bounine }
1858e8de3701SAlexandre Bounine 
1859e8de3701SAlexandre Bounine /*
1860e8de3701SAlexandre Bounine  * Mport cdev management
1861e8de3701SAlexandre Bounine  */
1862e8de3701SAlexandre Bounine 
1863e8de3701SAlexandre Bounine /*
1864e8de3701SAlexandre Bounine  * mport_cdev_open() - Open character device (mport)
1865e8de3701SAlexandre Bounine  */
1866e8de3701SAlexandre Bounine static int mport_cdev_open(struct inode *inode, struct file *filp)
1867e8de3701SAlexandre Bounine {
1868e8de3701SAlexandre Bounine 	int ret;
1869e8de3701SAlexandre Bounine 	int minor = iminor(inode);
1870e8de3701SAlexandre Bounine 	struct mport_dev *chdev;
1871e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv;
1872e8de3701SAlexandre Bounine 
1873e8de3701SAlexandre Bounine 	/* Test for valid device */
1874e8de3701SAlexandre Bounine 	if (minor >= RIO_MAX_MPORTS) {
1875e8de3701SAlexandre Bounine 		rmcd_error("Invalid minor device number");
1876e8de3701SAlexandre Bounine 		return -EINVAL;
1877e8de3701SAlexandre Bounine 	}
1878e8de3701SAlexandre Bounine 
1879e8de3701SAlexandre Bounine 	chdev = container_of(inode->i_cdev, struct mport_dev, cdev);
1880e8de3701SAlexandre Bounine 
1881e8de3701SAlexandre Bounine 	rmcd_debug(INIT, "%s filp=%p", dev_name(&chdev->dev), filp);
1882e8de3701SAlexandre Bounine 
1883e8de3701SAlexandre Bounine 	if (atomic_read(&chdev->active) == 0)
1884e8de3701SAlexandre Bounine 		return -ENODEV;
1885e8de3701SAlexandre Bounine 
1886e8de3701SAlexandre Bounine 	get_device(&chdev->dev);
1887e8de3701SAlexandre Bounine 
1888e8de3701SAlexandre Bounine 	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
1889e8de3701SAlexandre Bounine 	if (!priv) {
1890e8de3701SAlexandre Bounine 		put_device(&chdev->dev);
1891e8de3701SAlexandre Bounine 		return -ENOMEM;
1892e8de3701SAlexandre Bounine 	}
1893e8de3701SAlexandre Bounine 
1894e8de3701SAlexandre Bounine 	priv->md = chdev;
1895e8de3701SAlexandre Bounine 
1896e8de3701SAlexandre Bounine 	mutex_lock(&chdev->file_mutex);
1897e8de3701SAlexandre Bounine 	list_add_tail(&priv->list, &chdev->file_list);
1898e8de3701SAlexandre Bounine 	mutex_unlock(&chdev->file_mutex);
1899e8de3701SAlexandre Bounine 
1900e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&priv->db_filters);
1901e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&priv->pw_filters);
1902e8de3701SAlexandre Bounine 	spin_lock_init(&priv->fifo_lock);
1903e8de3701SAlexandre Bounine 	init_waitqueue_head(&priv->event_rx_wait);
1904e8de3701SAlexandre Bounine 	ret = kfifo_alloc(&priv->event_fifo,
1905e8de3701SAlexandre Bounine 			  sizeof(struct rio_event) * MPORT_EVENT_DEPTH,
1906e8de3701SAlexandre Bounine 			  GFP_KERNEL);
1907e8de3701SAlexandre Bounine 	if (ret < 0) {
1908e8de3701SAlexandre Bounine 		dev_err(&chdev->dev, DRV_NAME ": kfifo_alloc failed\n");
1909e8de3701SAlexandre Bounine 		ret = -ENOMEM;
1910e8de3701SAlexandre Bounine 		goto err_fifo;
1911e8de3701SAlexandre Bounine 	}
1912e8de3701SAlexandre Bounine 
1913e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1914e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&priv->async_list);
1915e8de3701SAlexandre Bounine 	spin_lock_init(&priv->req_lock);
1916e8de3701SAlexandre Bounine 	mutex_init(&priv->dma_lock);
1917e8de3701SAlexandre Bounine #endif
1918e8de3701SAlexandre Bounine 
1919e8de3701SAlexandre Bounine 	filp->private_data = priv;
1920e8de3701SAlexandre Bounine 	goto out;
1921e8de3701SAlexandre Bounine err_fifo:
1922e8de3701SAlexandre Bounine 	kfree(priv);
1923e8de3701SAlexandre Bounine out:
1924e8de3701SAlexandre Bounine 	return ret;
1925e8de3701SAlexandre Bounine }
1926e8de3701SAlexandre Bounine 
1927e8de3701SAlexandre Bounine static int mport_cdev_fasync(int fd, struct file *filp, int mode)
1928e8de3701SAlexandre Bounine {
1929e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
1930e8de3701SAlexandre Bounine 
1931e8de3701SAlexandre Bounine 	return fasync_helper(fd, filp, mode, &priv->async_queue);
1932e8de3701SAlexandre Bounine }
1933e8de3701SAlexandre Bounine 
1934e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
1935e8de3701SAlexandre Bounine static void mport_cdev_release_dma(struct file *filp)
1936e8de3701SAlexandre Bounine {
1937e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
1938e8de3701SAlexandre Bounine 	struct mport_dev *md;
1939e8de3701SAlexandre Bounine 	struct mport_dma_req *req, *req_next;
1940e8de3701SAlexandre Bounine 	unsigned long tmo = msecs_to_jiffies(dma_timeout);
1941e8de3701SAlexandre Bounine 	long wret;
1942e8de3701SAlexandre Bounine 	LIST_HEAD(list);
1943e8de3701SAlexandre Bounine 
1944e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "from filp=%p %s(%d)",
1945e8de3701SAlexandre Bounine 		   filp, current->comm, task_pid_nr(current));
1946e8de3701SAlexandre Bounine 
1947e8de3701SAlexandre Bounine 	if (!priv->dmach) {
1948e8de3701SAlexandre Bounine 		rmcd_debug(EXIT, "No DMA channel for filp=%p", filp);
1949e8de3701SAlexandre Bounine 		return;
1950e8de3701SAlexandre Bounine 	}
1951e8de3701SAlexandre Bounine 
1952e8de3701SAlexandre Bounine 	md = priv->md;
1953e8de3701SAlexandre Bounine 
1954e8de3701SAlexandre Bounine 	spin_lock(&priv->req_lock);
1955e8de3701SAlexandre Bounine 	if (!list_empty(&priv->async_list)) {
1956e8de3701SAlexandre Bounine 		rmcd_debug(EXIT, "async list not empty filp=%p %s(%d)",
1957e8de3701SAlexandre Bounine 			   filp, current->comm, task_pid_nr(current));
1958e8de3701SAlexandre Bounine 		list_splice_init(&priv->async_list, &list);
1959e8de3701SAlexandre Bounine 	}
1960e8de3701SAlexandre Bounine 	spin_unlock(&priv->req_lock);
1961e8de3701SAlexandre Bounine 
1962e8de3701SAlexandre Bounine 	if (!list_empty(&list)) {
1963e8de3701SAlexandre Bounine 		rmcd_debug(EXIT, "temp list not empty");
1964e8de3701SAlexandre Bounine 		list_for_each_entry_safe(req, req_next, &list, node) {
1965e8de3701SAlexandre Bounine 			rmcd_debug(EXIT, "free req->filp=%p cookie=%d compl=%s",
1966e8de3701SAlexandre Bounine 				   req->filp, req->cookie,
1967e8de3701SAlexandre Bounine 				   completion_done(&req->req_comp)?"yes":"no");
1968e8de3701SAlexandre Bounine 			list_del(&req->node);
1969bbd876adSIoan Nicu 			kref_put(&req->refcount, dma_req_free);
1970e8de3701SAlexandre Bounine 		}
1971e8de3701SAlexandre Bounine 	}
1972e8de3701SAlexandre Bounine 
1973e8de3701SAlexandre Bounine 	put_dma_channel(priv);
1974e8de3701SAlexandre Bounine 	wret = wait_for_completion_interruptible_timeout(&priv->comp, tmo);
1975e8de3701SAlexandre Bounine 
1976e8de3701SAlexandre Bounine 	if (wret <= 0) {
1977e8de3701SAlexandre Bounine 		rmcd_error("%s(%d) failed waiting for DMA release err=%ld",
1978e8de3701SAlexandre Bounine 			current->comm, task_pid_nr(current), wret);
1979e8de3701SAlexandre Bounine 	}
1980e8de3701SAlexandre Bounine 
1981e8de3701SAlexandre Bounine 	if (priv->dmach != priv->md->dma_chan) {
1982e8de3701SAlexandre Bounine 		rmcd_debug(EXIT, "Release DMA channel for filp=%p %s(%d)",
1983e8de3701SAlexandre Bounine 			   filp, current->comm, task_pid_nr(current));
1984e8de3701SAlexandre Bounine 		rio_release_dma(priv->dmach);
1985e8de3701SAlexandre Bounine 	} else {
1986e8de3701SAlexandre Bounine 		rmcd_debug(EXIT, "Adjust default DMA channel refcount");
1987e8de3701SAlexandre Bounine 		kref_put(&md->dma_ref, mport_release_def_dma);
1988e8de3701SAlexandre Bounine 	}
1989e8de3701SAlexandre Bounine 
1990e8de3701SAlexandre Bounine 	priv->dmach = NULL;
1991e8de3701SAlexandre Bounine }
1992e8de3701SAlexandre Bounine #else
1993e8de3701SAlexandre Bounine #define mport_cdev_release_dma(priv) do {} while (0)
1994e8de3701SAlexandre Bounine #endif
1995e8de3701SAlexandre Bounine 
1996e8de3701SAlexandre Bounine /*
1997e8de3701SAlexandre Bounine  * mport_cdev_release() - Release character device
1998e8de3701SAlexandre Bounine  */
1999e8de3701SAlexandre Bounine static int mport_cdev_release(struct inode *inode, struct file *filp)
2000e8de3701SAlexandre Bounine {
2001e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
2002e8de3701SAlexandre Bounine 	struct mport_dev *chdev;
2003e8de3701SAlexandre Bounine 	struct rio_mport_pw_filter *pw_filter, *pw_filter_next;
2004e8de3701SAlexandre Bounine 	struct rio_mport_db_filter *db_filter, *db_filter_next;
2005e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map, *_map;
2006e8de3701SAlexandre Bounine 	unsigned long flags;
2007e8de3701SAlexandre Bounine 
2008e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "%s filp=%p", dev_name(&priv->md->dev), filp);
2009e8de3701SAlexandre Bounine 
2010e8de3701SAlexandre Bounine 	chdev = priv->md;
2011e8de3701SAlexandre Bounine 	mport_cdev_release_dma(filp);
2012e8de3701SAlexandre Bounine 
2013e8de3701SAlexandre Bounine 	priv->event_mask = 0;
2014e8de3701SAlexandre Bounine 
2015e8de3701SAlexandre Bounine 	spin_lock_irqsave(&chdev->pw_lock, flags);
2016e8de3701SAlexandre Bounine 	if (!list_empty(&priv->pw_filters)) {
2017e8de3701SAlexandre Bounine 		list_for_each_entry_safe(pw_filter, pw_filter_next,
2018e8de3701SAlexandre Bounine 					 &priv->pw_filters, priv_node)
2019e8de3701SAlexandre Bounine 			rio_mport_delete_pw_filter(pw_filter);
2020e8de3701SAlexandre Bounine 	}
2021e8de3701SAlexandre Bounine 	spin_unlock_irqrestore(&chdev->pw_lock, flags);
2022e8de3701SAlexandre Bounine 
2023e8de3701SAlexandre Bounine 	spin_lock_irqsave(&chdev->db_lock, flags);
2024e8de3701SAlexandre Bounine 	list_for_each_entry_safe(db_filter, db_filter_next,
2025e8de3701SAlexandre Bounine 				 &priv->db_filters, priv_node) {
2026e8de3701SAlexandre Bounine 		rio_mport_delete_db_filter(db_filter);
2027e8de3701SAlexandre Bounine 	}
2028e8de3701SAlexandre Bounine 	spin_unlock_irqrestore(&chdev->db_lock, flags);
2029e8de3701SAlexandre Bounine 
2030e8de3701SAlexandre Bounine 	kfifo_free(&priv->event_fifo);
2031e8de3701SAlexandre Bounine 
2032e8de3701SAlexandre Bounine 	mutex_lock(&chdev->buf_mutex);
2033e8de3701SAlexandre Bounine 	list_for_each_entry_safe(map, _map, &chdev->mappings, node) {
2034e8de3701SAlexandre Bounine 		if (map->filp == filp) {
2035e8de3701SAlexandre Bounine 			rmcd_debug(EXIT, "release mapping %p filp=%p",
2036e8de3701SAlexandre Bounine 				   map->virt_addr, filp);
2037e8de3701SAlexandre Bounine 			kref_put(&map->ref, mport_release_mapping);
2038e8de3701SAlexandre Bounine 		}
2039e8de3701SAlexandre Bounine 	}
2040e8de3701SAlexandre Bounine 	mutex_unlock(&chdev->buf_mutex);
2041e8de3701SAlexandre Bounine 
2042e8de3701SAlexandre Bounine 	mport_cdev_fasync(-1, filp, 0);
2043e8de3701SAlexandre Bounine 	filp->private_data = NULL;
2044e8de3701SAlexandre Bounine 	mutex_lock(&chdev->file_mutex);
2045e8de3701SAlexandre Bounine 	list_del(&priv->list);
2046e8de3701SAlexandre Bounine 	mutex_unlock(&chdev->file_mutex);
2047e8de3701SAlexandre Bounine 	put_device(&chdev->dev);
2048e8de3701SAlexandre Bounine 	kfree(priv);
2049e8de3701SAlexandre Bounine 	return 0;
2050e8de3701SAlexandre Bounine }
2051e8de3701SAlexandre Bounine 
2052e8de3701SAlexandre Bounine /*
2053e8de3701SAlexandre Bounine  * mport_cdev_ioctl() - IOCTLs for character device
2054e8de3701SAlexandre Bounine  */
2055e8de3701SAlexandre Bounine static long mport_cdev_ioctl(struct file *filp,
2056e8de3701SAlexandre Bounine 		unsigned int cmd, unsigned long arg)
2057e8de3701SAlexandre Bounine {
2058e8de3701SAlexandre Bounine 	int err = -EINVAL;
2059e8de3701SAlexandre Bounine 	struct mport_cdev_priv *data = filp->private_data;
2060e8de3701SAlexandre Bounine 	struct mport_dev *md = data->md;
2061e8de3701SAlexandre Bounine 
2062e8de3701SAlexandre Bounine 	if (atomic_read(&md->active) == 0)
2063e8de3701SAlexandre Bounine 		return -ENODEV;
2064e8de3701SAlexandre Bounine 
2065e8de3701SAlexandre Bounine 	switch (cmd) {
2066e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_READ_LOCAL:
2067e8de3701SAlexandre Bounine 		return rio_mport_maint_rd(data, (void __user *)arg, 1);
2068e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_WRITE_LOCAL:
2069e8de3701SAlexandre Bounine 		return rio_mport_maint_wr(data, (void __user *)arg, 1);
2070e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_READ_REMOTE:
2071e8de3701SAlexandre Bounine 		return rio_mport_maint_rd(data, (void __user *)arg, 0);
2072e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_WRITE_REMOTE:
2073e8de3701SAlexandre Bounine 		return rio_mport_maint_wr(data, (void __user *)arg, 0);
2074e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_HDID_SET:
2075e8de3701SAlexandre Bounine 		return maint_hdid_set(data, (void __user *)arg);
2076e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_COMPTAG_SET:
2077e8de3701SAlexandre Bounine 		return maint_comptag_set(data, (void __user *)arg);
2078e8de3701SAlexandre Bounine 	case RIO_MPORT_MAINT_PORT_IDX_GET:
2079e8de3701SAlexandre Bounine 		return maint_port_idx_get(data, (void __user *)arg);
2080e8de3701SAlexandre Bounine 	case RIO_MPORT_GET_PROPERTIES:
2081e8de3701SAlexandre Bounine 		md->properties.hdid = md->mport->host_deviceid;
20824e1016daSAlexandre Bounine 		if (copy_to_user((void __user *)arg, &(md->properties),
20834e1016daSAlexandre Bounine 				 sizeof(md->properties)))
2084e8de3701SAlexandre Bounine 			return -EFAULT;
2085e8de3701SAlexandre Bounine 		return 0;
2086e8de3701SAlexandre Bounine 	case RIO_ENABLE_DOORBELL_RANGE:
2087e8de3701SAlexandre Bounine 		return rio_mport_add_db_filter(data, (void __user *)arg);
2088e8de3701SAlexandre Bounine 	case RIO_DISABLE_DOORBELL_RANGE:
2089e8de3701SAlexandre Bounine 		return rio_mport_remove_db_filter(data, (void __user *)arg);
2090e8de3701SAlexandre Bounine 	case RIO_ENABLE_PORTWRITE_RANGE:
2091e8de3701SAlexandre Bounine 		return rio_mport_add_pw_filter(data, (void __user *)arg);
2092e8de3701SAlexandre Bounine 	case RIO_DISABLE_PORTWRITE_RANGE:
2093e8de3701SAlexandre Bounine 		return rio_mport_remove_pw_filter(data, (void __user *)arg);
2094e8de3701SAlexandre Bounine 	case RIO_SET_EVENT_MASK:
20954e1016daSAlexandre Bounine 		data->event_mask = (u32)arg;
2096e8de3701SAlexandre Bounine 		return 0;
2097e8de3701SAlexandre Bounine 	case RIO_GET_EVENT_MASK:
2098e8de3701SAlexandre Bounine 		if (copy_to_user((void __user *)arg, &data->event_mask,
20994e1016daSAlexandre Bounine 				    sizeof(u32)))
2100e8de3701SAlexandre Bounine 			return -EFAULT;
2101e8de3701SAlexandre Bounine 		return 0;
2102e8de3701SAlexandre Bounine 	case RIO_MAP_OUTBOUND:
2103e8de3701SAlexandre Bounine 		return rio_mport_obw_map(filp, (void __user *)arg);
2104e8de3701SAlexandre Bounine 	case RIO_MAP_INBOUND:
2105e8de3701SAlexandre Bounine 		return rio_mport_map_inbound(filp, (void __user *)arg);
2106e8de3701SAlexandre Bounine 	case RIO_UNMAP_OUTBOUND:
2107e8de3701SAlexandre Bounine 		return rio_mport_obw_free(filp, (void __user *)arg);
2108e8de3701SAlexandre Bounine 	case RIO_UNMAP_INBOUND:
2109e8de3701SAlexandre Bounine 		return rio_mport_inbound_free(filp, (void __user *)arg);
2110e8de3701SAlexandre Bounine 	case RIO_ALLOC_DMA:
2111e8de3701SAlexandre Bounine 		return rio_mport_alloc_dma(filp, (void __user *)arg);
2112e8de3701SAlexandre Bounine 	case RIO_FREE_DMA:
2113e8de3701SAlexandre Bounine 		return rio_mport_free_dma(filp, (void __user *)arg);
2114e8de3701SAlexandre Bounine 	case RIO_WAIT_FOR_ASYNC:
2115e8de3701SAlexandre Bounine 		return rio_mport_wait_for_async_dma(filp, (void __user *)arg);
2116e8de3701SAlexandre Bounine 	case RIO_TRANSFER:
2117e8de3701SAlexandre Bounine 		return rio_mport_transfer_ioctl(filp, (void __user *)arg);
2118e8de3701SAlexandre Bounine 	case RIO_DEV_ADD:
2119e8de3701SAlexandre Bounine 		return rio_mport_add_riodev(data, (void __user *)arg);
2120e8de3701SAlexandre Bounine 	case RIO_DEV_DEL:
2121e8de3701SAlexandre Bounine 		return rio_mport_del_riodev(data, (void __user *)arg);
2122e8de3701SAlexandre Bounine 	default:
2123e8de3701SAlexandre Bounine 		break;
2124e8de3701SAlexandre Bounine 	}
2125e8de3701SAlexandre Bounine 
2126e8de3701SAlexandre Bounine 	return err;
2127e8de3701SAlexandre Bounine }
2128e8de3701SAlexandre Bounine 
2129e8de3701SAlexandre Bounine /*
2130e8de3701SAlexandre Bounine  * mport_release_mapping - free mapping resources and info structure
2131e8de3701SAlexandre Bounine  * @ref: a pointer to the kref within struct rio_mport_mapping
2132e8de3701SAlexandre Bounine  *
2133e8de3701SAlexandre Bounine  * NOTE: Shall be called while holding buf_mutex.
2134e8de3701SAlexandre Bounine  */
2135e8de3701SAlexandre Bounine static void mport_release_mapping(struct kref *ref)
2136e8de3701SAlexandre Bounine {
2137e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map =
2138e8de3701SAlexandre Bounine 			container_of(ref, struct rio_mport_mapping, ref);
2139e8de3701SAlexandre Bounine 	struct rio_mport *mport = map->md->mport;
2140e8de3701SAlexandre Bounine 
2141e8de3701SAlexandre Bounine 	rmcd_debug(MMAP, "type %d mapping @ %p (phys = %pad) for %s",
2142e8de3701SAlexandre Bounine 		   map->dir, map->virt_addr,
2143e8de3701SAlexandre Bounine 		   &map->phys_addr, mport->name);
2144e8de3701SAlexandre Bounine 
2145e8de3701SAlexandre Bounine 	list_del(&map->node);
2146e8de3701SAlexandre Bounine 
2147e8de3701SAlexandre Bounine 	switch (map->dir) {
2148e8de3701SAlexandre Bounine 	case MAP_INBOUND:
2149e8de3701SAlexandre Bounine 		rio_unmap_inb_region(mport, map->phys_addr);
215092bf5016SGustavo A. R. Silva 		/* fall through */
2151e8de3701SAlexandre Bounine 	case MAP_DMA:
2152e8de3701SAlexandre Bounine 		dma_free_coherent(mport->dev.parent, map->size,
2153e8de3701SAlexandre Bounine 				  map->virt_addr, map->phys_addr);
2154e8de3701SAlexandre Bounine 		break;
2155e8de3701SAlexandre Bounine 	case MAP_OUTBOUND:
2156e8de3701SAlexandre Bounine 		rio_unmap_outb_region(mport, map->rioid, map->rio_addr);
2157e8de3701SAlexandre Bounine 		break;
2158e8de3701SAlexandre Bounine 	}
2159e8de3701SAlexandre Bounine 	kfree(map);
2160e8de3701SAlexandre Bounine }
2161e8de3701SAlexandre Bounine 
2162e8de3701SAlexandre Bounine static void mport_mm_open(struct vm_area_struct *vma)
2163e8de3701SAlexandre Bounine {
2164e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map = vma->vm_private_data;
2165e8de3701SAlexandre Bounine 
2166ea87b8e1SJoe Perches 	rmcd_debug(MMAP, "%pad", &map->phys_addr);
2167e8de3701SAlexandre Bounine 	kref_get(&map->ref);
2168e8de3701SAlexandre Bounine }
2169e8de3701SAlexandre Bounine 
2170e8de3701SAlexandre Bounine static void mport_mm_close(struct vm_area_struct *vma)
2171e8de3701SAlexandre Bounine {
2172e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map = vma->vm_private_data;
2173e8de3701SAlexandre Bounine 
2174ea87b8e1SJoe Perches 	rmcd_debug(MMAP, "%pad", &map->phys_addr);
2175e8de3701SAlexandre Bounine 	mutex_lock(&map->md->buf_mutex);
2176e8de3701SAlexandre Bounine 	kref_put(&map->ref, mport_release_mapping);
2177e8de3701SAlexandre Bounine 	mutex_unlock(&map->md->buf_mutex);
2178e8de3701SAlexandre Bounine }
2179e8de3701SAlexandre Bounine 
2180e8de3701SAlexandre Bounine static const struct vm_operations_struct vm_ops = {
2181e8de3701SAlexandre Bounine 	.open =	mport_mm_open,
2182e8de3701SAlexandre Bounine 	.close = mport_mm_close,
2183e8de3701SAlexandre Bounine };
2184e8de3701SAlexandre Bounine 
2185e8de3701SAlexandre Bounine static int mport_cdev_mmap(struct file *filp, struct vm_area_struct *vma)
2186e8de3701SAlexandre Bounine {
2187e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
2188e8de3701SAlexandre Bounine 	struct mport_dev *md;
2189e8de3701SAlexandre Bounine 	size_t size = vma->vm_end - vma->vm_start;
2190e8de3701SAlexandre Bounine 	dma_addr_t baddr;
2191e8de3701SAlexandre Bounine 	unsigned long offset;
2192e8de3701SAlexandre Bounine 	int found = 0, ret;
2193e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map;
2194e8de3701SAlexandre Bounine 
2195e8de3701SAlexandre Bounine 	rmcd_debug(MMAP, "0x%x bytes at offset 0x%lx",
2196e8de3701SAlexandre Bounine 		   (unsigned int)size, vma->vm_pgoff);
2197e8de3701SAlexandre Bounine 
2198e8de3701SAlexandre Bounine 	md = priv->md;
2199e8de3701SAlexandre Bounine 	baddr = ((dma_addr_t)vma->vm_pgoff << PAGE_SHIFT);
2200e8de3701SAlexandre Bounine 
2201e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
2202e8de3701SAlexandre Bounine 	list_for_each_entry(map, &md->mappings, node) {
2203e8de3701SAlexandre Bounine 		if (baddr >= map->phys_addr &&
2204e8de3701SAlexandre Bounine 		    baddr < (map->phys_addr + map->size)) {
2205e8de3701SAlexandre Bounine 			found = 1;
2206e8de3701SAlexandre Bounine 			break;
2207e8de3701SAlexandre Bounine 		}
2208e8de3701SAlexandre Bounine 	}
2209e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
2210e8de3701SAlexandre Bounine 
2211e8de3701SAlexandre Bounine 	if (!found)
2212e8de3701SAlexandre Bounine 		return -ENOMEM;
2213e8de3701SAlexandre Bounine 
2214e8de3701SAlexandre Bounine 	offset = baddr - map->phys_addr;
2215e8de3701SAlexandre Bounine 
2216e8de3701SAlexandre Bounine 	if (size + offset > map->size)
2217e8de3701SAlexandre Bounine 		return -EINVAL;
2218e8de3701SAlexandre Bounine 
2219e8de3701SAlexandre Bounine 	vma->vm_pgoff = offset >> PAGE_SHIFT;
2220e8de3701SAlexandre Bounine 	rmcd_debug(MMAP, "MMAP adjusted offset = 0x%lx", vma->vm_pgoff);
2221e8de3701SAlexandre Bounine 
2222e8de3701SAlexandre Bounine 	if (map->dir == MAP_INBOUND || map->dir == MAP_DMA)
2223e8de3701SAlexandre Bounine 		ret = dma_mmap_coherent(md->mport->dev.parent, vma,
2224e8de3701SAlexandre Bounine 				map->virt_addr, map->phys_addr, map->size);
2225e8de3701SAlexandre Bounine 	else if (map->dir == MAP_OUTBOUND) {
2226e8de3701SAlexandre Bounine 		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
2227e8de3701SAlexandre Bounine 		ret = vm_iomap_memory(vma, map->phys_addr, map->size);
2228e8de3701SAlexandre Bounine 	} else {
2229e8de3701SAlexandre Bounine 		rmcd_error("Attempt to mmap unsupported mapping type");
2230e8de3701SAlexandre Bounine 		ret = -EIO;
2231e8de3701SAlexandre Bounine 	}
2232e8de3701SAlexandre Bounine 
2233e8de3701SAlexandre Bounine 	if (!ret) {
2234e8de3701SAlexandre Bounine 		vma->vm_private_data = map;
2235e8de3701SAlexandre Bounine 		vma->vm_ops = &vm_ops;
2236e8de3701SAlexandre Bounine 		mport_mm_open(vma);
2237e8de3701SAlexandre Bounine 	} else {
2238e8de3701SAlexandre Bounine 		rmcd_error("MMAP exit with err=%d", ret);
2239e8de3701SAlexandre Bounine 	}
2240e8de3701SAlexandre Bounine 
2241e8de3701SAlexandre Bounine 	return ret;
2242e8de3701SAlexandre Bounine }
2243e8de3701SAlexandre Bounine 
2244afc9a42bSAl Viro static __poll_t mport_cdev_poll(struct file *filp, poll_table *wait)
2245e8de3701SAlexandre Bounine {
2246e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
2247e8de3701SAlexandre Bounine 
2248e8de3701SAlexandre Bounine 	poll_wait(filp, &priv->event_rx_wait, wait);
2249e8de3701SAlexandre Bounine 	if (kfifo_len(&priv->event_fifo))
2250a9a08845SLinus Torvalds 		return EPOLLIN | EPOLLRDNORM;
2251e8de3701SAlexandre Bounine 
2252e8de3701SAlexandre Bounine 	return 0;
2253e8de3701SAlexandre Bounine }
2254e8de3701SAlexandre Bounine 
2255e8de3701SAlexandre Bounine static ssize_t mport_read(struct file *filp, char __user *buf, size_t count,
2256e8de3701SAlexandre Bounine 			loff_t *ppos)
2257e8de3701SAlexandre Bounine {
2258e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
2259e8de3701SAlexandre Bounine 	int copied;
2260e8de3701SAlexandre Bounine 	ssize_t ret;
2261e8de3701SAlexandre Bounine 
2262e8de3701SAlexandre Bounine 	if (!count)
2263e8de3701SAlexandre Bounine 		return 0;
2264e8de3701SAlexandre Bounine 
2265e8de3701SAlexandre Bounine 	if (kfifo_is_empty(&priv->event_fifo) &&
2266e8de3701SAlexandre Bounine 	    (filp->f_flags & O_NONBLOCK))
2267e8de3701SAlexandre Bounine 		return -EAGAIN;
2268e8de3701SAlexandre Bounine 
2269e8de3701SAlexandre Bounine 	if (count % sizeof(struct rio_event))
2270e8de3701SAlexandre Bounine 		return -EINVAL;
2271e8de3701SAlexandre Bounine 
2272e8de3701SAlexandre Bounine 	ret = wait_event_interruptible(priv->event_rx_wait,
2273e8de3701SAlexandre Bounine 					kfifo_len(&priv->event_fifo) != 0);
2274e8de3701SAlexandre Bounine 	if (ret)
2275e8de3701SAlexandre Bounine 		return ret;
2276e8de3701SAlexandre Bounine 
2277e8de3701SAlexandre Bounine 	while (ret < count) {
2278e8de3701SAlexandre Bounine 		if (kfifo_to_user(&priv->event_fifo, buf,
2279e8de3701SAlexandre Bounine 		      sizeof(struct rio_event), &copied))
2280e8de3701SAlexandre Bounine 			return -EFAULT;
2281e8de3701SAlexandre Bounine 		ret += copied;
2282e8de3701SAlexandre Bounine 		buf += copied;
2283e8de3701SAlexandre Bounine 	}
2284e8de3701SAlexandre Bounine 
2285e8de3701SAlexandre Bounine 	return ret;
2286e8de3701SAlexandre Bounine }
2287e8de3701SAlexandre Bounine 
2288e8de3701SAlexandre Bounine static ssize_t mport_write(struct file *filp, const char __user *buf,
2289e8de3701SAlexandre Bounine 			 size_t count, loff_t *ppos)
2290e8de3701SAlexandre Bounine {
2291e8de3701SAlexandre Bounine 	struct mport_cdev_priv *priv = filp->private_data;
2292e8de3701SAlexandre Bounine 	struct rio_mport *mport = priv->md->mport;
2293e8de3701SAlexandre Bounine 	struct rio_event event;
2294e8de3701SAlexandre Bounine 	int len, ret;
2295e8de3701SAlexandre Bounine 
2296e8de3701SAlexandre Bounine 	if (!count)
2297e8de3701SAlexandre Bounine 		return 0;
2298e8de3701SAlexandre Bounine 
2299e8de3701SAlexandre Bounine 	if (count % sizeof(event))
2300e8de3701SAlexandre Bounine 		return -EINVAL;
2301e8de3701SAlexandre Bounine 
2302e8de3701SAlexandre Bounine 	len = 0;
2303e8de3701SAlexandre Bounine 	while ((count - len) >= (int)sizeof(event)) {
2304e8de3701SAlexandre Bounine 		if (copy_from_user(&event, buf, sizeof(event)))
2305e8de3701SAlexandre Bounine 			return -EFAULT;
2306e8de3701SAlexandre Bounine 
2307e8de3701SAlexandre Bounine 		if (event.header != RIO_DOORBELL)
2308e8de3701SAlexandre Bounine 			return -EINVAL;
2309e8de3701SAlexandre Bounine 
2310e8de3701SAlexandre Bounine 		ret = rio_mport_send_doorbell(mport,
23114e1016daSAlexandre Bounine 					      event.u.doorbell.rioid,
2312e8de3701SAlexandre Bounine 					      event.u.doorbell.payload);
2313e8de3701SAlexandre Bounine 		if (ret < 0)
2314e8de3701SAlexandre Bounine 			return ret;
2315e8de3701SAlexandre Bounine 
2316e8de3701SAlexandre Bounine 		len += sizeof(event);
2317e8de3701SAlexandre Bounine 		buf += sizeof(event);
2318e8de3701SAlexandre Bounine 	}
2319e8de3701SAlexandre Bounine 
2320e8de3701SAlexandre Bounine 	return len;
2321e8de3701SAlexandre Bounine }
2322e8de3701SAlexandre Bounine 
2323e8de3701SAlexandre Bounine static const struct file_operations mport_fops = {
2324e8de3701SAlexandre Bounine 	.owner		= THIS_MODULE,
2325e8de3701SAlexandre Bounine 	.open		= mport_cdev_open,
2326e8de3701SAlexandre Bounine 	.release	= mport_cdev_release,
2327e8de3701SAlexandre Bounine 	.poll		= mport_cdev_poll,
2328e8de3701SAlexandre Bounine 	.read		= mport_read,
2329e8de3701SAlexandre Bounine 	.write		= mport_write,
2330e8de3701SAlexandre Bounine 	.mmap		= mport_cdev_mmap,
2331e8de3701SAlexandre Bounine 	.fasync		= mport_cdev_fasync,
2332e8de3701SAlexandre Bounine 	.unlocked_ioctl = mport_cdev_ioctl
2333e8de3701SAlexandre Bounine };
2334e8de3701SAlexandre Bounine 
2335e8de3701SAlexandre Bounine /*
2336e8de3701SAlexandre Bounine  * Character device management
2337e8de3701SAlexandre Bounine  */
2338e8de3701SAlexandre Bounine 
2339e8de3701SAlexandre Bounine static void mport_device_release(struct device *dev)
2340e8de3701SAlexandre Bounine {
2341e8de3701SAlexandre Bounine 	struct mport_dev *md;
2342e8de3701SAlexandre Bounine 
2343e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "%s", dev_name(dev));
2344e8de3701SAlexandre Bounine 	md = container_of(dev, struct mport_dev, dev);
2345e8de3701SAlexandre Bounine 	kfree(md);
2346e8de3701SAlexandre Bounine }
2347e8de3701SAlexandre Bounine 
2348e8de3701SAlexandre Bounine /*
2349e8de3701SAlexandre Bounine  * mport_cdev_add() - Create mport_dev from rio_mport
2350e8de3701SAlexandre Bounine  * @mport:	RapidIO master port
2351e8de3701SAlexandre Bounine  */
2352e8de3701SAlexandre Bounine static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
2353e8de3701SAlexandre Bounine {
2354e8de3701SAlexandre Bounine 	int ret = 0;
2355e8de3701SAlexandre Bounine 	struct mport_dev *md;
2356e8de3701SAlexandre Bounine 	struct rio_mport_attr attr;
2357e8de3701SAlexandre Bounine 
23584e1016daSAlexandre Bounine 	md = kzalloc(sizeof(*md), GFP_KERNEL);
2359e8de3701SAlexandre Bounine 	if (!md) {
2360e8de3701SAlexandre Bounine 		rmcd_error("Unable allocate a device object");
2361e8de3701SAlexandre Bounine 		return NULL;
2362e8de3701SAlexandre Bounine 	}
2363e8de3701SAlexandre Bounine 
2364e8de3701SAlexandre Bounine 	md->mport = mport;
2365e8de3701SAlexandre Bounine 	mutex_init(&md->buf_mutex);
2366e8de3701SAlexandre Bounine 	mutex_init(&md->file_mutex);
2367e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&md->file_list);
2368e8de3701SAlexandre Bounine 
2369dbef390dSLogan Gunthorpe 	device_initialize(&md->dev);
2370dbef390dSLogan Gunthorpe 	md->dev.devt = MKDEV(MAJOR(dev_number), mport->id);
2371e8de3701SAlexandre Bounine 	md->dev.class = dev_class;
2372e8de3701SAlexandre Bounine 	md->dev.parent = &mport->dev;
2373e8de3701SAlexandre Bounine 	md->dev.release = mport_device_release;
2374e8de3701SAlexandre Bounine 	dev_set_name(&md->dev, DEV_NAME "%d", mport->id);
2375e8de3701SAlexandre Bounine 	atomic_set(&md->active, 1);
2376e8de3701SAlexandre Bounine 
2377dbef390dSLogan Gunthorpe 	cdev_init(&md->cdev, &mport_fops);
2378dbef390dSLogan Gunthorpe 	md->cdev.owner = THIS_MODULE;
2379dbef390dSLogan Gunthorpe 
2380dbef390dSLogan Gunthorpe 	ret = cdev_device_add(&md->cdev, &md->dev);
2381e8de3701SAlexandre Bounine 	if (ret) {
2382e8de3701SAlexandre Bounine 		rmcd_error("Failed to register mport %d (err=%d)",
2383e8de3701SAlexandre Bounine 		       mport->id, ret);
2384e8de3701SAlexandre Bounine 		goto err_cdev;
2385e8de3701SAlexandre Bounine 	}
2386e8de3701SAlexandre Bounine 
2387e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&md->doorbells);
2388e8de3701SAlexandre Bounine 	spin_lock_init(&md->db_lock);
2389e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&md->portwrites);
2390e8de3701SAlexandre Bounine 	spin_lock_init(&md->pw_lock);
2391e8de3701SAlexandre Bounine 	INIT_LIST_HEAD(&md->mappings);
2392e8de3701SAlexandre Bounine 
2393e8de3701SAlexandre Bounine 	md->properties.id = mport->id;
2394e8de3701SAlexandre Bounine 	md->properties.sys_size = mport->sys_size;
2395e8de3701SAlexandre Bounine 	md->properties.hdid = mport->host_deviceid;
2396e8de3701SAlexandre Bounine 	md->properties.index = mport->index;
2397e8de3701SAlexandre Bounine 
2398e8de3701SAlexandre Bounine 	/* The transfer_mode property will be returned through mport query
2399e8de3701SAlexandre Bounine 	 * interface
2400e8de3701SAlexandre Bounine 	 */
24014e1016daSAlexandre Bounine #ifdef CONFIG_FSL_RIO /* for now: only on Freescale's SoCs */
2402e8de3701SAlexandre Bounine 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_MAPPED;
2403e8de3701SAlexandre Bounine #else
2404e8de3701SAlexandre Bounine 	md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
2405e8de3701SAlexandre Bounine #endif
2406e8de3701SAlexandre Bounine 	ret = rio_query_mport(mport, &attr);
2407e8de3701SAlexandre Bounine 	if (!ret) {
2408e8de3701SAlexandre Bounine 		md->properties.flags = attr.flags;
2409e8de3701SAlexandre Bounine 		md->properties.link_speed = attr.link_speed;
2410e8de3701SAlexandre Bounine 		md->properties.link_width = attr.link_width;
2411e8de3701SAlexandre Bounine 		md->properties.dma_max_sge = attr.dma_max_sge;
2412e8de3701SAlexandre Bounine 		md->properties.dma_max_size = attr.dma_max_size;
2413e8de3701SAlexandre Bounine 		md->properties.dma_align = attr.dma_align;
2414e8de3701SAlexandre Bounine 		md->properties.cap_sys_size = 0;
2415e8de3701SAlexandre Bounine 		md->properties.cap_transfer_mode = 0;
2416e8de3701SAlexandre Bounine 		md->properties.cap_addr_size = 0;
2417e8de3701SAlexandre Bounine 	} else
2418e8de3701SAlexandre Bounine 		pr_info(DRV_PREFIX "Failed to obtain info for %s cdev(%d:%d)\n",
2419e8de3701SAlexandre Bounine 			mport->name, MAJOR(dev_number), mport->id);
2420e8de3701SAlexandre Bounine 
2421e8de3701SAlexandre Bounine 	mutex_lock(&mport_devs_lock);
2422e8de3701SAlexandre Bounine 	list_add_tail(&md->node, &mport_devs);
2423e8de3701SAlexandre Bounine 	mutex_unlock(&mport_devs_lock);
2424e8de3701SAlexandre Bounine 
2425e8de3701SAlexandre Bounine 	pr_info(DRV_PREFIX "Added %s cdev(%d:%d)\n",
2426e8de3701SAlexandre Bounine 		mport->name, MAJOR(dev_number), mport->id);
2427e8de3701SAlexandre Bounine 
2428e8de3701SAlexandre Bounine 	return md;
2429e8de3701SAlexandre Bounine 
2430e8de3701SAlexandre Bounine err_cdev:
2431dbef390dSLogan Gunthorpe 	put_device(&md->dev);
2432e8de3701SAlexandre Bounine 	return NULL;
2433e8de3701SAlexandre Bounine }
2434e8de3701SAlexandre Bounine 
2435e8de3701SAlexandre Bounine /*
2436e8de3701SAlexandre Bounine  * mport_cdev_terminate_dma() - Stop all active DMA data transfers and release
2437e8de3701SAlexandre Bounine  *                              associated DMA channels.
2438e8de3701SAlexandre Bounine  */
2439e8de3701SAlexandre Bounine static void mport_cdev_terminate_dma(struct mport_dev *md)
2440e8de3701SAlexandre Bounine {
2441e8de3701SAlexandre Bounine #ifdef CONFIG_RAPIDIO_DMA_ENGINE
2442e8de3701SAlexandre Bounine 	struct mport_cdev_priv *client;
2443e8de3701SAlexandre Bounine 
2444e8de3701SAlexandre Bounine 	rmcd_debug(DMA, "%s", dev_name(&md->dev));
2445e8de3701SAlexandre Bounine 
2446e8de3701SAlexandre Bounine 	mutex_lock(&md->file_mutex);
2447e8de3701SAlexandre Bounine 	list_for_each_entry(client, &md->file_list, list) {
2448e8de3701SAlexandre Bounine 		if (client->dmach) {
2449e8de3701SAlexandre Bounine 			dmaengine_terminate_all(client->dmach);
2450e8de3701SAlexandre Bounine 			rio_release_dma(client->dmach);
2451e8de3701SAlexandre Bounine 		}
2452e8de3701SAlexandre Bounine 	}
2453e8de3701SAlexandre Bounine 	mutex_unlock(&md->file_mutex);
2454e8de3701SAlexandre Bounine 
2455e8de3701SAlexandre Bounine 	if (md->dma_chan) {
2456e8de3701SAlexandre Bounine 		dmaengine_terminate_all(md->dma_chan);
2457e8de3701SAlexandre Bounine 		rio_release_dma(md->dma_chan);
2458e8de3701SAlexandre Bounine 		md->dma_chan = NULL;
2459e8de3701SAlexandre Bounine 	}
2460e8de3701SAlexandre Bounine #endif
2461e8de3701SAlexandre Bounine }
2462e8de3701SAlexandre Bounine 
2463e8de3701SAlexandre Bounine 
2464e8de3701SAlexandre Bounine /*
2465e8de3701SAlexandre Bounine  * mport_cdev_kill_fasync() - Send SIGIO signal to all processes with open
2466e8de3701SAlexandre Bounine  *                            mport_cdev files.
2467e8de3701SAlexandre Bounine  */
2468e8de3701SAlexandre Bounine static int mport_cdev_kill_fasync(struct mport_dev *md)
2469e8de3701SAlexandre Bounine {
2470e8de3701SAlexandre Bounine 	unsigned int files = 0;
2471e8de3701SAlexandre Bounine 	struct mport_cdev_priv *client;
2472e8de3701SAlexandre Bounine 
2473e8de3701SAlexandre Bounine 	mutex_lock(&md->file_mutex);
2474e8de3701SAlexandre Bounine 	list_for_each_entry(client, &md->file_list, list) {
2475e8de3701SAlexandre Bounine 		if (client->async_queue)
2476e8de3701SAlexandre Bounine 			kill_fasync(&client->async_queue, SIGIO, POLL_HUP);
2477e8de3701SAlexandre Bounine 		files++;
2478e8de3701SAlexandre Bounine 	}
2479e8de3701SAlexandre Bounine 	mutex_unlock(&md->file_mutex);
2480e8de3701SAlexandre Bounine 	return files;
2481e8de3701SAlexandre Bounine }
2482e8de3701SAlexandre Bounine 
2483e8de3701SAlexandre Bounine /*
2484e8de3701SAlexandre Bounine  * mport_cdev_remove() - Remove mport character device
2485e8de3701SAlexandre Bounine  * @dev:	Mport device to remove
2486e8de3701SAlexandre Bounine  */
2487e8de3701SAlexandre Bounine static void mport_cdev_remove(struct mport_dev *md)
2488e8de3701SAlexandre Bounine {
2489e8de3701SAlexandre Bounine 	struct rio_mport_mapping *map, *_map;
2490e8de3701SAlexandre Bounine 
2491e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "Remove %s cdev", md->mport->name);
2492e8de3701SAlexandre Bounine 	atomic_set(&md->active, 0);
2493e8de3701SAlexandre Bounine 	mport_cdev_terminate_dma(md);
2494e8de3701SAlexandre Bounine 	rio_del_mport_pw_handler(md->mport, md, rio_mport_pw_handler);
2495dbef390dSLogan Gunthorpe 	cdev_device_del(&md->cdev, &md->dev);
2496e8de3701SAlexandre Bounine 	mport_cdev_kill_fasync(md);
2497e8de3701SAlexandre Bounine 
2498e8de3701SAlexandre Bounine 	/* TODO: do we need to give clients some time to close file
2499e8de3701SAlexandre Bounine 	 * descriptors? Simple wait for XX, or kref?
2500e8de3701SAlexandre Bounine 	 */
2501e8de3701SAlexandre Bounine 
2502e8de3701SAlexandre Bounine 	/*
2503e8de3701SAlexandre Bounine 	 * Release DMA buffers allocated for the mport device.
2504e8de3701SAlexandre Bounine 	 * Disable associated inbound Rapidio requests mapping if applicable.
2505e8de3701SAlexandre Bounine 	 */
2506e8de3701SAlexandre Bounine 	mutex_lock(&md->buf_mutex);
2507e8de3701SAlexandre Bounine 	list_for_each_entry_safe(map, _map, &md->mappings, node) {
2508e8de3701SAlexandre Bounine 		kref_put(&map->ref, mport_release_mapping);
2509e8de3701SAlexandre Bounine 	}
2510e8de3701SAlexandre Bounine 	mutex_unlock(&md->buf_mutex);
2511e8de3701SAlexandre Bounine 
2512e8de3701SAlexandre Bounine 	if (!list_empty(&md->mappings))
2513e8de3701SAlexandre Bounine 		rmcd_warn("WARNING: %s pending mappings on removal",
2514e8de3701SAlexandre Bounine 			  md->mport->name);
2515e8de3701SAlexandre Bounine 
2516e8de3701SAlexandre Bounine 	rio_release_inb_dbell(md->mport, 0, 0x0fff);
2517e8de3701SAlexandre Bounine 
2518e8de3701SAlexandre Bounine 	put_device(&md->dev);
2519e8de3701SAlexandre Bounine }
2520e8de3701SAlexandre Bounine 
2521e8de3701SAlexandre Bounine /*
2522e8de3701SAlexandre Bounine  * RIO rio_mport_interface driver
2523e8de3701SAlexandre Bounine  */
2524e8de3701SAlexandre Bounine 
2525e8de3701SAlexandre Bounine /*
2526e8de3701SAlexandre Bounine  * mport_add_mport() - Add rio_mport from LDM device struct
2527e8de3701SAlexandre Bounine  * @dev:		Linux device model struct
2528e8de3701SAlexandre Bounine  * @class_intf:	Linux class_interface
2529e8de3701SAlexandre Bounine  */
2530e8de3701SAlexandre Bounine static int mport_add_mport(struct device *dev,
2531e8de3701SAlexandre Bounine 		struct class_interface *class_intf)
2532e8de3701SAlexandre Bounine {
2533e8de3701SAlexandre Bounine 	struct rio_mport *mport = NULL;
2534e8de3701SAlexandre Bounine 	struct mport_dev *chdev = NULL;
2535e8de3701SAlexandre Bounine 
2536e8de3701SAlexandre Bounine 	mport = to_rio_mport(dev);
2537e8de3701SAlexandre Bounine 	if (!mport)
2538e8de3701SAlexandre Bounine 		return -ENODEV;
2539e8de3701SAlexandre Bounine 
2540e8de3701SAlexandre Bounine 	chdev = mport_cdev_add(mport);
2541e8de3701SAlexandre Bounine 	if (!chdev)
2542e8de3701SAlexandre Bounine 		return -ENODEV;
2543e8de3701SAlexandre Bounine 
2544e8de3701SAlexandre Bounine 	return 0;
2545e8de3701SAlexandre Bounine }
2546e8de3701SAlexandre Bounine 
2547e8de3701SAlexandre Bounine /*
2548e8de3701SAlexandre Bounine  * mport_remove_mport() - Remove rio_mport from global list
2549e8de3701SAlexandre Bounine  * TODO remove device from global mport_dev list
2550e8de3701SAlexandre Bounine  */
2551e8de3701SAlexandre Bounine static void mport_remove_mport(struct device *dev,
2552e8de3701SAlexandre Bounine 		struct class_interface *class_intf)
2553e8de3701SAlexandre Bounine {
2554e8de3701SAlexandre Bounine 	struct rio_mport *mport = NULL;
2555e8de3701SAlexandre Bounine 	struct mport_dev *chdev;
2556e8de3701SAlexandre Bounine 	int found = 0;
2557e8de3701SAlexandre Bounine 
2558e8de3701SAlexandre Bounine 	mport = to_rio_mport(dev);
2559e8de3701SAlexandre Bounine 	rmcd_debug(EXIT, "Remove %s", mport->name);
2560e8de3701SAlexandre Bounine 
2561e8de3701SAlexandre Bounine 	mutex_lock(&mport_devs_lock);
2562e8de3701SAlexandre Bounine 	list_for_each_entry(chdev, &mport_devs, node) {
2563e8de3701SAlexandre Bounine 		if (chdev->mport->id == mport->id) {
2564e8de3701SAlexandre Bounine 			atomic_set(&chdev->active, 0);
2565e8de3701SAlexandre Bounine 			list_del(&chdev->node);
2566e8de3701SAlexandre Bounine 			found = 1;
2567e8de3701SAlexandre Bounine 			break;
2568e8de3701SAlexandre Bounine 		}
2569e8de3701SAlexandre Bounine 	}
2570e8de3701SAlexandre Bounine 	mutex_unlock(&mport_devs_lock);
2571e8de3701SAlexandre Bounine 
2572e8de3701SAlexandre Bounine 	if (found)
2573e8de3701SAlexandre Bounine 		mport_cdev_remove(chdev);
2574e8de3701SAlexandre Bounine }
2575e8de3701SAlexandre Bounine 
2576e8de3701SAlexandre Bounine /* the rio_mport_interface is used to handle local mport devices */
2577e8de3701SAlexandre Bounine static struct class_interface rio_mport_interface __refdata = {
2578e8de3701SAlexandre Bounine 	.class		= &rio_mport_class,
2579e8de3701SAlexandre Bounine 	.add_dev	= mport_add_mport,
2580e8de3701SAlexandre Bounine 	.remove_dev	= mport_remove_mport,
2581e8de3701SAlexandre Bounine };
2582e8de3701SAlexandre Bounine 
2583e8de3701SAlexandre Bounine /*
2584e8de3701SAlexandre Bounine  * Linux kernel module
2585e8de3701SAlexandre Bounine  */
2586e8de3701SAlexandre Bounine 
2587e8de3701SAlexandre Bounine /*
2588e8de3701SAlexandre Bounine  * mport_init - Driver module loading
2589e8de3701SAlexandre Bounine  */
2590e8de3701SAlexandre Bounine static int __init mport_init(void)
2591e8de3701SAlexandre Bounine {
2592e8de3701SAlexandre Bounine 	int ret;
2593e8de3701SAlexandre Bounine 
2594e8de3701SAlexandre Bounine 	/* Create device class needed by udev */
2595e8de3701SAlexandre Bounine 	dev_class = class_create(THIS_MODULE, DRV_NAME);
259699f23c2cSVladimir Zapolskiy 	if (IS_ERR(dev_class)) {
2597e8de3701SAlexandre Bounine 		rmcd_error("Unable to create " DRV_NAME " class");
259899f23c2cSVladimir Zapolskiy 		return PTR_ERR(dev_class);
2599e8de3701SAlexandre Bounine 	}
2600e8de3701SAlexandre Bounine 
2601e8de3701SAlexandre Bounine 	ret = alloc_chrdev_region(&dev_number, 0, RIO_MAX_MPORTS, DRV_NAME);
2602e8de3701SAlexandre Bounine 	if (ret < 0)
2603e8de3701SAlexandre Bounine 		goto err_chr;
2604e8de3701SAlexandre Bounine 
2605e8de3701SAlexandre Bounine 	rmcd_debug(INIT, "Registered class with major=%d", MAJOR(dev_number));
2606e8de3701SAlexandre Bounine 
2607e8de3701SAlexandre Bounine 	/* Register to rio_mport_interface */
2608e8de3701SAlexandre Bounine 	ret = class_interface_register(&rio_mport_interface);
2609e8de3701SAlexandre Bounine 	if (ret) {
2610e8de3701SAlexandre Bounine 		rmcd_error("class_interface_register() failed, err=%d", ret);
2611e8de3701SAlexandre Bounine 		goto err_cli;
2612e8de3701SAlexandre Bounine 	}
2613e8de3701SAlexandre Bounine 
2614e8de3701SAlexandre Bounine 	return 0;
2615e8de3701SAlexandre Bounine 
2616e8de3701SAlexandre Bounine err_cli:
2617e8de3701SAlexandre Bounine 	unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2618e8de3701SAlexandre Bounine err_chr:
2619e8de3701SAlexandre Bounine 	class_destroy(dev_class);
2620e8de3701SAlexandre Bounine 	return ret;
2621e8de3701SAlexandre Bounine }
2622e8de3701SAlexandre Bounine 
2623e8de3701SAlexandre Bounine /**
2624e8de3701SAlexandre Bounine  * mport_exit - Driver module unloading
2625e8de3701SAlexandre Bounine  */
2626e8de3701SAlexandre Bounine static void __exit mport_exit(void)
2627e8de3701SAlexandre Bounine {
2628e8de3701SAlexandre Bounine 	class_interface_unregister(&rio_mport_interface);
2629e8de3701SAlexandre Bounine 	class_destroy(dev_class);
2630e8de3701SAlexandre Bounine 	unregister_chrdev_region(dev_number, RIO_MAX_MPORTS);
2631e8de3701SAlexandre Bounine }
2632e8de3701SAlexandre Bounine 
2633e8de3701SAlexandre Bounine module_init(mport_init);
2634e8de3701SAlexandre Bounine module_exit(mport_exit);
2635